| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming < %s | FileCheck %s |
| |
| ; lookup table expand one register |
| |
| define {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @luti4_i16(<vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: luti4_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: luti4 { z0.h - z3.h }, zt0, z0[1] |
| ; CHECK-NEXT: ret |
| %res = call {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32 0, <vscale x 16 x i8> %x, i32 1) |
| ret {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} %res |
| } |
| |
| define {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @luti4_i32(<vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: luti4_i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1] |
| ; CHECK-NEXT: ret |
| %res = call {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32 0, <vscale x 16 x i8> %x, i32 1) |
| ret {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} %res |
| } |
| |
| define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @luti4_bf16(<vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: luti4_bf16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: luti4 { z0.h - z3.h }, zt0, z0[1] |
| ; CHECK-NEXT: ret |
| %res = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32 0, <vscale x 16 x i8> %x, i32 1) |
| ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %res |
| } |
| |
| define {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @luti4_f16(<vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: luti4_f16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: luti4 { z0.h - z3.h }, zt0, z0[1] |
| ; CHECK-NEXT: ret |
| %res = call {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %x, i32 1) |
| ret {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} %res |
| } |
| |
| define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @luti4_f32(<vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: luti4_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1] |
| ; CHECK-NEXT: ret |
| %res = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1) |
| ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res |
| } |
| |
| ; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd. |
| define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %x) { |
| ; CHECK-LABEL: test_multiple_luti4_zt_i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr zt0, [x0] |
| ; CHECK-NEXT: luti4 { z4.s - z7.s }, zt0, z0[1] |
| ; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7 |
| ; CHECK-NEXT: ldr zt0, [x1] |
| ; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1] |
| ; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3 |
| ; CHECK-NEXT: ret |
| tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA) |
| %res1 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1) |
| tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB) |
| %res2 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1) |
| |
| call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res1) |
| call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res2) |
| ret void |
| } |
| |
| declare {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32, <vscale x 16 x i8>, i32) |
| declare {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32, <vscale x 16 x i8>, i32) |
| declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32, <vscale x 16 x i8>, i32) |
| declare {<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32, <vscale x 16 x i8>, i32) |
| declare {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32, <vscale x 16 x i8>, i32) |