blob: 85afb5f8e61f737e7069b24ad200ed86aec20438 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
;
; Masked Loads
;
define <vscale x 2 x i64> @masked_sload_nxv2i8(<vscale x 2 x i8> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 2 x i64> @masked_sload_nxv2i16(<vscale x 2 x i16> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 2 x i64> @masked_sload_nxv2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv2i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
define <vscale x 4 x i32> @masked_sload_nxv4i8(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv4i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
define <vscale x 4 x i32> @masked_sload_nxv4i16(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
ret <vscale x 4 x i32> %ext
}
define <vscale x 8 x i16> @masked_sload_nxv8i8(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv8i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
ret <vscale x 8 x i16> %ext
}
define <vscale x 2 x i64> @masked_sload_passthru(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
; CHECK-LABEL: masked_sload_passthru:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: sxtw z0.d, p1/m, z0.d
; CHECK-NEXT: mov z0.d, p0/m, z1.d
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ext
}
; Return type requires splitting
define <vscale x 16 x i32> @masked_sload_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: masked_sload_nxv16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: sunpklo z1.h, z0.b
; CHECK-NEXT: sunpkhi z3.h, z0.b
; CHECK-NEXT: sunpklo z0.s, z1.h
; CHECK-NEXT: sunpkhi z1.s, z1.h
; CHECK-NEXT: sunpklo z2.s, z3.h
; CHECK-NEXT: sunpkhi z3.s, z3.h
; CHECK-NEXT: ret
%load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
%ext = sext <vscale x 16 x i8> %load to <vscale x 16 x i32>
ret <vscale x 16 x i32> %ext
}
; Masked load requires promotion
define <vscale x 4 x double> @masked_sload_4i8_4f32(<vscale x 4 x i8>* noalias %in, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: masked_sload_4i8_4f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: sunpkhi z1.d, z0.s
; CHECK-NEXT: sunpklo z0.d, z0.s
; CHECK-NEXT: scvtf z0.d, p1/m, z0.d
; CHECK-NEXT: scvtf z1.d, p1/m, z1.d
; CHECK-NEXT: ret
%wide.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %in, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
%sext = sext <vscale x 4 x i8> %wide.load to <vscale x 4 x i64>
%res = sitofp <vscale x 4 x i64> %sext to <vscale x 4 x double>
ret <vscale x 4 x double> %res
}
declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)