blob: 6017070b114a5449e1042b01ff39779f9748a2d1 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p2 < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+sme2p2 < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2p2 -force-streaming < %s | FileCheck %s --check-prefixes=CHECK
;
; COMPACT
;
define <vscale x 16 x i8> @compact_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
; CHECK-LABEL: compact_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.b, p0, z0.b
; CHECK-NEXT: ret
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.compact.nxv16i8(<vscale x 16 x i1> %pg,
<vscale x 16 x i8> %a)
ret <vscale x 16 x i8> %out
}
define <vscale x 8 x i16> @compact_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
; CHECK-LABEL: compact_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.h, p0, z0.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.compact.nxv8i16(<vscale x 8 x i1> %pg,
<vscale x 8 x i16> %a)
ret <vscale x 8 x i16> %out
}
define <vscale x 4 x i32> @compact_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
; CHECK-LABEL: compact_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.compact.nxv4i32(<vscale x 4 x i1> %pg,
<vscale x 4 x i32> %a)
ret <vscale x 4 x i32> %out
}
define <vscale x 2 x i64> @compact_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
; CHECK-LABEL: compact_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.compact.nxv2i64(<vscale x 2 x i1> %pg,
<vscale x 2 x i64> %a)
ret <vscale x 2 x i64> %out
}
define <vscale x 8 x half> @compact_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
; CHECK-LABEL: compact_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.h, p0, z0.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x half> @llvm.aarch64.sve.compact.nxv8f16(<vscale x 8 x i1> %pg,
<vscale x 8 x half> %a)
ret <vscale x 8 x half> %out
}
define <vscale x 4 x float> @compact_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
; CHECK-LABEL: compact_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.s, p0, z0.s
; CHECK-NEXT: ret
%out = call <vscale x 4 x float> @llvm.aarch64.sve.compact.nxv4f32(<vscale x 4 x i1> %pg,
<vscale x 4 x float> %a)
ret <vscale x 4 x float> %out
}
define <vscale x 2 x double> @compact_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
; CHECK-LABEL: compact_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.d, p0, z0.d
; CHECK-NEXT: ret
%out = call <vscale x 2 x double> @llvm.aarch64.sve.compact.nxv2f64(<vscale x 2 x i1> %pg,
<vscale x 2 x double> %a)
ret <vscale x 2 x double> %out
}
define <vscale x 8 x bfloat> @compact_bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a) {
; CHECK-LABEL: compact_bf16:
; CHECK: // %bb.0:
; CHECK-NEXT: compact z0.h, p0, z0.h
; CHECK-NEXT: ret
%out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.compact.nxv8bf16(<vscale x 8 x i1> %pg,
<vscale x 8 x bfloat> %a)
ret <vscale x 8 x bfloat> %out
}
declare <vscale x 16 x i8> @llvm.aarch64.sve.compact.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.compact.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.compact.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
declare <vscale x 2 x i64> @llvm.aarch64.sve.compact.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
declare <vscale x 8 x half> @llvm.aarch64.sve.compact.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>)
declare <vscale x 4 x float> @llvm.aarch64.sve.compact.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>)
declare <vscale x 2 x double> @llvm.aarch64.sve.compact.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.compact.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>)