blob: 73c2e06f14d63c196c1c53aa5aeb4d285f70a83f [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs \
; RUN: < %s | FileCheck %s --check-prefixes=CHECK,NOVLDEP,ZVFH
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs \
; RUN: < %s | FileCheck %s --check-prefixes=CHECK,NOVLDEP,ZVFHMIN
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin,+vl-dependent-latency -verify-machineinstrs \
; RUN: < %s | FileCheck %s --check-prefixes=CHECK,VLDEP,ZVFH
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin,+vl-dependent-latency -verify-machineinstrs \
; RUN: < %s | FileCheck %s --check-prefixes=CHECK,VLDEP,ZVFHMIN
define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2i64:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 1
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2i64:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 1
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <2 x i64> %v
}
define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2i64_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 1
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2i64_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 1
; VLDEP-NEXT: ret
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <2 x i64> %v
}
define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_v2i64_zero_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 0, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <2 x i64> %v
}
define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2i64_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 1, v0.t
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2i64_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 1, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 1, <2 x i1> %mask, i32 %evla, i32 %evlb)
ret <2 x i64> %v
}
define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4i32:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 3
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4i32:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 3
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <4 x i32> %v
}
define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4i32_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 3
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4i32_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 3
; VLDEP-NEXT: ret
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <4 x i32> %v
}
define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4i32_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 3, v0.t
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4i32_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 3, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 3, <4 x i1> %mask, i32 %evla, i32 %evlb)
ret <4 x i32> %v
}
define <8 x i16> @test_vp_splice_v8i16(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8i16:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8i16:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x i16> %v
}
define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8i16_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 5
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8i16_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetivli zero, 5, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 5
; VLDEP-NEXT: ret
%v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x i16> %v
}
define <8 x i16> @test_vp_splice_v8i16_masked(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8i16_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8i16_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
ret <8 x i16> %v
}
define <16 x i8> @test_vp_splice_v16i8(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v16i8:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v16i8:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5
; VLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <16 x i8> %v
}
define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v16i8_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 5
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v16i8_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetivli zero, 5, e8, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 5
; VLDEP-NEXT: ret
%v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 -5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <16 x i8> %v
}
define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v16i8_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v16i8_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
ret <16 x i8> %v
}
define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2f64:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 1
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2f64:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 1
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <2 x double> %v
}
define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2f64_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 1
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2f64_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 1
; VLDEP-NEXT: ret
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -1, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <2 x double> %v
}
define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v2f64_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 1, v0.t
; NOVLDEP-NEXT: addi a0, a0, -1
; NOVLDEP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v2f64_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -1
; VLDEP-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 1, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 1, <2 x i1> %mask, i32 %evla, i32 %evlb)
ret <2 x double> %v
}
define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4f32:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 3
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4f32:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 3
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <4 x float> %v
}
define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4f32_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 3
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4f32_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 3
; VLDEP-NEXT: ret
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -3, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <4 x float> %v
}
define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v4f32_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 3, v0.t
; NOVLDEP-NEXT: addi a0, a0, -3
; NOVLDEP-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v4f32_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -3
; VLDEP-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 3, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 3, <4 x i1> %mask, i32 %evla, i32 %evlb)
ret <4 x float> %v
}
define <8 x half> @test_vp_splice_v8f16(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8f16:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8f16:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x half> %v
}
define <8 x half> @test_vp_splice_v8f16_negative_offset(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8f16_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 5
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8f16_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetivli zero, 5, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 5
; VLDEP-NEXT: ret
%v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x half> %v
}
define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8f16_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8f16_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
ret <8 x half> %v
}
define <4 x i32> @test_vp_splice_v4i32_with_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_v4i32_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <4 x i32> poison, i32 %first, i32 0
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
ret <4 x i32> %v
}
define <4 x i32> @test_vp_splice_v4i32_with_splat_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_v4i32_with_splat_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%ins = insertelement <4 x i32> poison, i32 %first, i32 0
%splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %splat, <4 x i32> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
ret <4 x i32> %v
}
define <4 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <4 x float> poison, float %first, i32 0
%v = call <4 x float> @llvm.experimental.vp.splice.nxv2f32(<4 x float> %va, <4 x float> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
ret <4 x float> %v
}
define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; ZVFH-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; ZVFH-NEXT: vmv1r.v v8, v9
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vslide1up.vx v9, v8, a1, v0.t
; ZVFHMIN-NEXT: vmv1r.v v8, v9
; ZVFHMIN-NEXT: ret
%va = insertelement <4 x half> poison, half %first, i32 0
%v = call <4 x half> @llvm.experimental.vp.splice.nxv2f16(<4 x half> %va, <4 x half> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
ret <4 x half> %v
}
define <8 x bfloat> @test_vp_splice_v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8bf16:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8bf16:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vx v8, v9, a0
; VLDEP-NEXT: ret
%v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x bfloat> %v
}
define <8 x bfloat> @test_vp_splice_v8bf16_negative_offset(<8 x bfloat> %va, <8 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8bf16_negative_offset:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vx v8, v8, a0
; NOVLDEP-NEXT: vslideup.vi v8, v9, 5
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8bf16_negative_offset:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetivli zero, 5, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vx v8, v8, a0
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; VLDEP-NEXT: vslideup.vi v8, v9, 5
; VLDEP-NEXT: ret
%v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <8 x bfloat> %v
}
define <8 x bfloat> @test_vp_splice_v8bf16_masked(<8 x bfloat> %va, <8 x bfloat> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
; NOVLDEP-LABEL: test_vp_splice_v8bf16_masked:
; NOVLDEP: # %bb.0:
; NOVLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; NOVLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; NOVLDEP-NEXT: addi a0, a0, -5
; NOVLDEP-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; NOVLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; NOVLDEP-NEXT: ret
;
; VLDEP-LABEL: test_vp_splice_v8bf16_masked:
; VLDEP: # %bb.0:
; VLDEP-NEXT: addi a0, a0, -5
; VLDEP-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; VLDEP-NEXT: vslidedown.vi v8, v8, 5, v0.t
; VLDEP-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; VLDEP-NEXT: vslideup.vx v8, v9, a0, v0.t
; VLDEP-NEXT: ret
%v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
ret <8 x bfloat> %v
}