blob: 48f93c8323cfd75ea171d7949986bb31d3e8a613 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
; Check that EVL tail folded loops from the loop vectorizer are able to have the
; vl of non-VP instructions reduced.
define void @evl_tail_folded(ptr %p, ptr %q) {
; CHECK-LABEL: evl_tail_folded:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: srli a3, a3, 2
; CHECK-NEXT: addi a4, a3, 1023
; CHECK-NEXT: neg a5, a3
; CHECK-NEXT: and a4, a4, a5
; CHECK-NEXT: li a5, 1024
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub a6, a5, a2
; CHECK-NEXT: slli a7, a2, 3
; CHECK-NEXT: vsetvli a6, a6, e64, m2, ta, ma
; CHECK-NEXT: add t0, a0, a7
; CHECK-NEXT: vle64.v v8, (t0)
; CHECK-NEXT: sub a4, a4, a3
; CHECK-NEXT: add a7, a1, a7
; CHECK-NEXT: vadd.vi v8, v8, 1
; CHECK-NEXT: vse64.v v8, (a7)
; CHECK-NEXT: add a2, a2, a6
; CHECK-NEXT: bnez a4, .LBB0_1
; CHECK-NEXT: # %bb.2: # %exit
; CHECK-NEXT: ret
entry:
%0 = tail call i64 @llvm.vscale.i64()
%1 = shl i64 %0, 1
%n.rnd.up = add i64 %1, 1023
%n.mod.vf = urem i64 %n.rnd.up, %1
%n.vec = sub i64 %n.rnd.up, %n.mod.vf
%2 = tail call i64 @llvm.vscale.i64()
%3 = shl i64 %2, 1
br label %vector.body
vector.body:
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%evl.based.iv = phi i64 [ 0, %entry ], [ %index.evl.next, %vector.body ]
%avl = sub i64 1024, %evl.based.iv
%4 = tail call i32 @llvm.experimental.get.vector.length.i64(i64 %avl, i32 2, i1 true)
%5 = getelementptr i64, ptr %p, i64 %evl.based.iv
%vp.op.load = tail call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr %5, <vscale x 2 x i1> splat (i1 true), i32 %4)
%6 = add <vscale x 2 x i64> %vp.op.load, splat (i64 1)
%7 = getelementptr i64, ptr %q, i64 %evl.based.iv
tail call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> %6, ptr %7, <vscale x 2 x i1> splat (i1 true), i32 %4)
%8 = zext i32 %4 to i64
%index.evl.next = add i64 %evl.based.iv, %8
%index.next = add i64 %index, %3
%9 = icmp eq i64 %index.next, %n.vec
br i1 %9, label %exit, label %vector.body
exit:
ret void
}