blob: 4af3fabb2b4d5f21cd0646b4cda74792030a1f6e [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mattr=+sme2 -enable-subreg-liveness < %s| FileCheck %s
target triple = "aarch64"
; The tile-slice addressing mode supports an immediate of 0-7.
; This is testing an immediate of 0, 1, 7 (folded) and 8 (not folded).
define void @sme_tileslice_addrmode_zero_base_plus_constant_offset(i32 %slice, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4) "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: sme_tileslice_addrmode_zero_base_plus_constant_offset:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: mov w9, #8 // =0x8
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w8, 1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w9, 0, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: ret
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 1, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 7, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 8, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
ret void
}
; The tile-slice addressing mode supports an immediate of 0-7.
; This is testing an immediate of 0, 1, 7 (folded) and 8 (not folded).
define void @sme_tileslice_addrmode_base_plus_constant_offset(i32 %slice, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4) "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: sme_tileslice_addrmode_base_plus_constant_offset:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: add w9, w0, #8
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w8, 1, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: fdot za.s[w9, 0, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: ret
%slice0 = add i32 %slice, 0
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice0, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
%slice1 = add i32 %slice, 1
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice1, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
%slice7 = add i32 %slice, 7
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice7, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
%slice8 = add i32 %slice, 8
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice8, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
ret void
}
define void @sme_tileslice_addrmode_base_plus_zero_offset(i32 %slice, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4) "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: sme_tileslice_addrmode_base_plus_zero_offset:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z0.h - z3.h }, z4.h[0]
; CHECK-NEXT: ret
tail call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, <vscale x 8 x half> %4, i32 0)
ret void
}
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32 immarg)