| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s |
| |
| declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i64) |
| declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i64, i64 immarg) |
| |
| declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i8* , i64) |
| declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64, i64) |
| |
| define i64 @test_vleff_nxv8i8(<vscale x 8 x i8> *%p, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8 |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i64 %vl) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8_tu |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_TU1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i64 %vl) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8_mask |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 |
| ; CHECK-NEXT: $v0 = COPY [[COPY1]] |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl, i64 0) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i8* %base, i64 %vl) |
| %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, i8* %base, i64 %vl, i64* %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 |
| ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1 |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_TU:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_TU [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_TU1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, i64 %vl) |
| %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64* %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY $v8 |
| ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1 |
| ; CHECK-NEXT: $v0 = COPY [[COPY1]] |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0) |
| %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2 |
| ret i64 %1 |
| } |