| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s |
| |
| declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, ptr, i64) |
| declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64, i64 immarg) |
| |
| declare {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64) |
| declare {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i1>, i64, i64, i64) |
| |
| define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8 |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, ptr %p, i64 %vl) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8_tu |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl) { |
| ; CHECK-LABEL: name: test_vleff_nxv8i8_mask |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8, $x10, $v0, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8 |
| ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]] |
| ; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl, i64 0) |
| %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8 |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %base, i64 %vl, i64 3) |
| %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8_tu(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, ptr %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8_v9, $x10, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY $v8_v9 |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 3) |
| %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1 |
| ret i64 %1 |
| } |
| |
| define i64 @test_vlseg2ff_nxv8i8_mask(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, ptr %outvl) { |
| ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask |
| ; CHECK: bb.0.entry: |
| ; CHECK-NEXT: liveins: $v8_v9, $x10, $v0, $x11 |
| ; CHECK-NEXT: {{ $}} |
| ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 |
| ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v0 |
| ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 |
| ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrn2m1nov0 = COPY $v8_v9 |
| ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]] |
| ; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1) |
| ; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]] |
| ; CHECK-NEXT: PseudoRET implicit $x10 |
| entry: |
| %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0, i64 3) |
| %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1 |
| ret i64 %1 |
| } |