blob: 823c2bbd0c968bd82cfaf68112892afc462e5a50 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: -riscv-enable-vl-optimizer=false | FileCheck %s -check-prefixes=CHECK,NOVLOPT
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: -riscv-enable-vl-optimizer=false | FileCheck %s -check-prefixes=CHECK,NOVLOPT
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -riscv-enable-vl-optimizer \
; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,VLOPT
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -riscv-enable-vl-optimizer \
; RUN: -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,VLOPT
declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
define <vscale x 4 x i32> @different_imm_vl_with_ta(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; NOVLOPT-LABEL: different_imm_vl_with_ta:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v10, v12
; NOVLOPT-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: different_imm_vl_with_ta:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; VLOPT-NEXT: vadd.vv v8, v10, v12
; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 5)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
ret <vscale x 4 x i32> %w
}
define <vscale x 4 x i32> @vlmax_and_imm_vl_with_ta(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; NOVLOPT-LABEL: vlmax_and_imm_vl_with_ta:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v10, v12
; NOVLOPT-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; NOVLOPT-NEXT: vadd.vv v8, v8, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vlmax_and_imm_vl_with_ta:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; VLOPT-NEXT: vadd.vv v8, v10, v12
; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
ret <vscale x 4 x i32> %w
}
; Not beneficial to propagate VL since VL is larger in the use side.
define <vscale x 4 x i32> @different_imm_vl_with_ta_larger_vl(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_with_ta_larger_vl:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v10, v12
; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 4)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 5)
ret <vscale x 4 x i32> %w
}
define <vscale x 4 x i32> @different_imm_reg_vl_with_ta(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_reg_vl_with_ta:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v10, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 4)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen %vl1)
ret <vscale x 4 x i32> %w
}
; Not beneficial to propagate VL since VL is already one.
define <vscale x 4 x i32> @different_imm_vl_with_ta_1(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_with_ta_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v10, v12
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 1)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen %vl1)
ret <vscale x 4 x i32> %w
}
; Propgate %vl2 to last instruction since it is may smaller than %vl1,
; it's still safe even %vl2 is larger than %vl1, becuase rest of the vector are
; undefined value.
define <vscale x 4 x i32> @different_vl_with_ta(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_with_ta:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v10, v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v10, v8
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a,iXLen %vl2)
ret <vscale x 4 x i32> %w
}
; We can propagate VL to a tail-undisturbed policy, provided none of its users
; are passthrus (i.e. read past VL).
define <vscale x 4 x i32> @different_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_with_tu:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vmv2r.v v14, v10
; CHECK-NEXT: vadd.vv v14, v10, v12
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v8, v14, v10
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen %vl2)
ret <vscale x 4 x i32> %w
}
; We can propagate VL to a tail-undisturbed policy, provided none of its users
; are passthrus (i.e. read past VL).
define <vscale x 4 x i32> @different_imm_vl_with_tu(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; NOVLOPT-LABEL: different_imm_vl_with_tu:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; NOVLOPT-NEXT: vmv2r.v v14, v10
; NOVLOPT-NEXT: vadd.vv v14, v10, v12
; NOVLOPT-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; NOVLOPT-NEXT: vadd.vv v8, v14, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: different_imm_vl_with_tu:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; VLOPT-NEXT: vmv2r.v v14, v10
; VLOPT-NEXT: vadd.vv v14, v10, v12
; VLOPT-NEXT: vadd.vv v8, v14, v10
; VLOPT-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 5)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a, iXLen 4)
ret <vscale x 4 x i32> %w
}
; We can't reduce the VL as %v is used as a passthru, i.e. the elements past VL
; are demanded.
define <vscale x 4 x i32> @different_vl_as_passthru(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_vl_as_passthru:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vadd.vv v12, v8, v10
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v12, v8, v10
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %v, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl2)
ret <vscale x 4 x i32> %w
}
; We can't reduce the VL as %v is used as a passthru, i.e. the elements past VL
; are demanded.
define <vscale x 4 x i32> @different_imm_vl_as_passthru(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl1, iXLen %vl2) {
; CHECK-LABEL: different_imm_vl_as_passthru:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vadd.vv v12, v8, v10
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-NEXT: vadd.vv v12, v8, v10
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 5)
%w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %v, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen 4)
ret <vscale x 4 x i32> %w
}
define <vscale x 4 x i32> @dont_optimize_tied_def(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) {
; CHECK-LABEL: dont_optimize_tied_def:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma
; CHECK-NEXT: vwmacc.vv v8, v10, v11
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vwmacc.vv v8, v10, v11
; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
%2 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %1, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl, iXLen 0)
ret <vscale x 4 x i32> %2
}
define void @optimize_ternary_use(<vscale x 4 x i16> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, ptr %p, iXLen %vl) {
; NOVLOPT-LABEL: optimize_ternary_use:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vzext.vf2 v14, v8
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; NOVLOPT-NEXT: vmadd.vv v14, v10, v12
; NOVLOPT-NEXT: vse32.v v14, (a0)
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: optimize_ternary_use:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vzext.vf2 v14, v8
; VLOPT-NEXT: vmadd.vv v14, v10, v12
; VLOPT-NEXT: vse32.v v14, (a0)
; VLOPT-NEXT: ret
%1 = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
%2 = mul <vscale x 4 x i32> %b, %1
%3 = add <vscale x 4 x i32> %2, %c
call void @llvm.riscv.vse(<vscale x 4 x i32> %3, ptr %p, iXLen %vl)
ret void
}
; This function has a copy between two vrm2 virtual registers, make sure we can
; reduce vl between it.
define void @fadd_fcmp_select_copy(<vscale x 4 x float> %v, <vscale x 4 x i1> %c, ptr %p, iXLen %vl) {
; NOVLOPT-LABEL: fadd_fcmp_select_copy:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
; NOVLOPT-NEXT: vfadd.vv v8, v8, v8
; NOVLOPT-NEXT: fmv.w.x fa5, zero
; NOVLOPT-NEXT: vmflt.vf v10, v8, fa5
; NOVLOPT-NEXT: vmand.mm v10, v0, v10
; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; NOVLOPT-NEXT: vse32.v v8, (a0)
; NOVLOPT-NEXT: vsm.v v10, (a0)
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: fadd_fcmp_select_copy:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v8
; VLOPT-NEXT: fmv.w.x fa5, zero
; VLOPT-NEXT: vmflt.vf v10, v8, fa5
; VLOPT-NEXT: vmand.mm v10, v0, v10
; VLOPT-NEXT: vse32.v v8, (a0)
; VLOPT-NEXT: vsm.v v10, (a0)
; VLOPT-NEXT: ret
%fadd = fadd <vscale x 4 x float> %v, %v
%fcmp = fcmp olt <vscale x 4 x float> %fadd, zeroinitializer
%select = select <vscale x 4 x i1> %c, <vscale x 4 x i1> %fcmp, <vscale x 4 x i1> zeroinitializer
call void @llvm.riscv.vse(<vscale x 4 x float> %fadd, ptr %p, iXLen %vl)
call void @llvm.riscv.vsm(<vscale x 4 x i1> %select, ptr %p, iXLen %vl)
ret void
}