| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve2p1 < %s | FileCheck %s |
| ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve,+sme2p1 < %s | FileCheck %s |
| ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sme2p1 -force-streaming < %s | FileCheck %s |
| ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sme,+sve2p1 -force-streaming < %s | FileCheck %s |
| |
| define <vscale x 16 x i8> @test_dupq_i8 (<vscale x 16 x i8> %zn) { |
| ; CHECK-LABEL: test_dupq_i8: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.b, z0.b[15] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.laneq.nxv16i8(<vscale x 16 x i8> %zn, i32 15) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @test_dupq_i16 (<vscale x 8 x i16> %zn) { |
| ; CHECK-LABEL: test_dupq_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.h, z0.h[7] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.laneq.nxv8i16(<vscale x 8 x i16> %zn, i32 7) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define <vscale x 4 x i32> @test_dupq__i32 (<vscale x 4 x i32> %zn) { |
| ; CHECK-LABEL: test_dupq__i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.s, z0.s[3] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.laneq.nxv4i32(<vscale x 4 x i32> %zn, i32 3) |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 2 x i64> @test_dupq_i64 (<vscale x 2 x i64> %zn) { |
| ; CHECK-LABEL: test_dupq_i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.d, z0.d[1] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.laneq.nxv2i64(<vscale x 2 x i64> %zn, i32 1) |
| ret <vscale x 2 x i64> %res |
| } |
| |
| define <vscale x 8 x half> @test_dupq_f16(<vscale x 8 x half> %zn) { |
| ; CHECK-LABEL: test_dupq_f16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.h, z0.h[4] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.laneq.nxv8f16(<vscale x 8 x half> %zn, i32 4) |
| ret <vscale x 8 x half> %res |
| } |
| |
| define <vscale x 4 x float> @test_dupq_f32(<vscale x 4 x float> %zn) { |
| ; CHECK-LABEL: test_dupq_f32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.s, z0.s[2] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.laneq.nxv4f32(<vscale x 4 x float> %zn, i32 2) |
| ret <vscale x 4 x float> %res |
| } |
| |
| define <vscale x 2 x double> @test_dupq_f64(<vscale x 2 x double> %zn) { |
| ; CHECK-LABEL: test_dupq_f64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.d, z0.d[0] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.laneq.nxv2f64(<vscale x 2 x double> %zn, i32 0) |
| ret <vscale x 2 x double> %res |
| } |
| |
| define <vscale x 8 x bfloat> @test_dupq_bf16(<vscale x 8 x bfloat> %zn) { |
| ; CHECK-LABEL: test_dupq_bf16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: dupq z0.h, z0.h[1] |
| ; CHECK-NEXT: ret |
| %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.dup.laneq.nxv8bf16(<vscale x 8 x bfloat> %zn, i32 1) |
| ret <vscale x 8 x bfloat> %res |
| } |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.laneq.nxv16i8(<vscale x 16 x i8>, i32) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.laneq.nxv8i16(<vscale x 8 x i16>, i32) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.laneq.nxv4i32(<vscale x 4 x i32>, i32) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.laneq.nxv2i64(<vscale x 2 x i64>, i32) |
| declare <vscale x 8 x half> @llvm.aarch64.sve.dup.laneq.nxv8f16(<vscale x 8 x half>, i32) |
| declare <vscale x 4 x float> @llvm.aarch64.sve.dup.laneq.nxv4f32(<vscale x 4 x float>, i32) |
| declare <vscale x 2 x double> @llvm.aarch64.sve.dup.laneq.nxv2f64(<vscale x 2 x double>, i32) |
| declare <vscale x 8 x bfloat> @llvm.aarch64.sve.dup.laneq.nxv8bf16(<vscale x 8 x bfloat>, i32) |