| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "// kill:.*$" --version 4 |
| ; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s |
| |
| target triple = "aarch64-linux" |
| |
| define void @test_fmla_f16_vg2_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) #0 { |
| ; CHECK-LABEL: test_fmla_f16_vg2_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: ret |
| call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) |
| ret void |
| } |
| |
| define void @test_fmla_f16_vg4_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| ; CHECK-LABEL: test_fmla_f16_vg4_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: fmla za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: ret |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg2_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) #0 { |
| ; CHECK-LABEL: test_fmls_f16_vg2_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: ret |
| call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, <vscale x 8 x half> %b) |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg4_single(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| ; CHECK-LABEL: test_fmls_f16_vg4_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: fmls za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: ret |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, <vscale x 8 x half> %b) |
| ret void |
| } |
| |
| define void @test_fmla_f16_vg2_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmla_f16_vg2_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) #0 { |
| call void @llvm.aarch64.sme.fmla.vg1x2.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.vg1x2.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) |
| ret void |
| } |
| |
| define void @test_fmla_f16_vg4_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmla_f16_vg4_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: fmla za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) #0 { |
| call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg2_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmls_f16_vg2_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) #0 { |
| call void @llvm.aarch64.sme.fmls.vg1x2.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.vg1x2.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1) |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg4_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmls_f16_vg4_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: fmls za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) #0 { |
| call void @llvm.aarch64.sme.fmls.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.vg1x4.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b0, <vscale x 8 x half> %b1, |
| <vscale x 8 x half> %b2, <vscale x 8 x half> %b3) |
| ret void |
| } |
| |
| define void @test_fmla_f16_vg2_index(i32 %slice, |
| ; CHECK-LABEL: test_fmla_f16_vg2_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: fmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmla_f16_vg4_index(i32 %slice, |
| ; CHECK-LABEL: test_fmla_f16_vg4_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg2_index(i32 %slice, |
| ; CHECK-LABEL: test_fmls_f16_vg2_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: fmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8f16(i32 %slice.7, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmls_f16_vg4_index(i32 %slice, |
| ; CHECK-LABEL: test_fmls_f16_vg4_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: fmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: ret |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8f16(i32 %slice, |
| <vscale x 8 x half> %a0, <vscale x 8 x half> %a1, |
| <vscale x 8 x half> %a2, <vscale x 8 x half> %a3, |
| <vscale x 8 x half> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg2_single(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) #0 { |
| ; CHECK-LABEL: test_fmla_bf16_vg2_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: ret |
| call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg4_single(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| ; CHECK-LABEL: test_fmla_bf16_vg4_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: bfmla za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg2_single(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) #0 { |
| ; CHECK-LABEL: test_fmls_bf16_vg2_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h |
| ; CHECK: ret |
| call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, <vscale x 8 x bfloat> %b) |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg4_single(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| ; CHECK-LABEL: test_fmls_bf16_vg4_single: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: bfmls za.h[w8, 7, vgx4], { z0.h - z3.h }, z4.h |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, <vscale x 8 x bfloat> %b) |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg2_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmla_bf16_vg2_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) #0 { |
| call void @llvm.aarch64.sme.fmla.vg1x2.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.vg1x2.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg4_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmla_bf16_vg4_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: bfmla za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) #0 { |
| call void @llvm.aarch64.sme.fmla.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.vg1x4.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg2_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmls_bf16_vg2_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, { z2.h, z3.h } |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) #0 { |
| call void @llvm.aarch64.sme.fmls.vg1x2.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.vg1x2.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1) |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg4_multi(i32 %slice, |
| ; CHECK-LABEL: test_fmls_bf16_vg4_multi: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: bfmls za.h[w8, 7, vgx4], { z0.h - z3.h }, { z4.h - z7.h } |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) #0 { |
| call void @llvm.aarch64.sme.fmls.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.vg1x4.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b0, <vscale x 8 x bfloat> %b1, |
| <vscale x 8 x bfloat> %b2, <vscale x 8 x bfloat> %b3) |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg2_index(i32 %slice, |
| ; CHECK-LABEL: test_fmla_bf16_vg2_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: bfmla za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmla_bf16_vg4_index(i32 %slice, |
| ; CHECK-LABEL: test_fmla_bf16_vg4_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: bfmla za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg2_index(i32 %slice, |
| ; CHECK-LABEL: test_fmls_bf16_vg2_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: bfmls za.h[w8, 7, vgx2], { z0.h, z1.h }, z2.h[7] |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv8bf16(i32 %slice.7, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %b, i32 7); |
| ret void |
| } |
| |
| define void @test_fmls_bf16_vg4_index(i32 %slice, |
| ; CHECK-LABEL: test_fmls_bf16_vg4_index: |
| ; CHECK: // %bb.0: |
| ; CHECK: mov w8, w0 |
| ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: bfmls za.h[w8, 0, vgx4], { z0.h - z3.h }, z4.h[7] |
| ; CHECK: ret |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b) #0 { |
| call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b, i32 7); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv8bf16(i32 %slice, |
| <vscale x 8 x bfloat> %a0, <vscale x 8 x bfloat> %a1, |
| <vscale x 8 x bfloat> %a2, <vscale x 8 x bfloat> %a3, |
| <vscale x 8 x bfloat> %b, i32 7); |
| ret void |
| } |
| |
| attributes #0 = { nounwind "target-features"="+sme2p1,+sme-f16f16,+sme-b16b16" } |