blob: 919f33a2b18c45736dbb86e766703bf3dbe73fff [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+Zve64x,+m -verify-machineinstrs < %s | FileCheck %s
declare i64 @llvm.vscale.i64()
define i64 @vscale_lshr(i64 %TC) {
; CHECK-LABEL: vscale_lshr:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 6
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shifted = lshr i64 %vscale, 3
%urem = urem i64 %TC, %shifted
ret i64 %urem
}
define i64 @vscale(i64 %TC) {
; CHECK-LABEL: vscale:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 3
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%urem = urem i64 %TC, %vscale
ret i64 %urem
}
define i64 @vscale_shl(i64 %TC) {
; CHECK-LABEL: vscale_shl:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shifted = shl i64 %vscale, 3
%urem = urem i64 %TC, %shifted
ret i64 %urem
}
define i64 @TC_minus_rem(i64 %TC) {
; CHECK-LABEL: TC_minus_rem:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a1, a1, 3
; CHECK-NEXT: neg a1, a1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%urem = urem i64 %TC, %vscale
%VTC = sub i64 %TC, %urem
ret i64 %VTC
}
define i64 @TC_minus_rem_shl(i64 %TC) {
; CHECK-LABEL: TC_minus_rem_shl:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: neg a1, a1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shifted = shl i64 %vscale, 3
%urem = urem i64 %TC, %shifted
%VTC = sub i64 %TC, %urem
ret i64 %VTC
}
define i64 @con1024_minus_rem() {
; CHECK-LABEL: con1024_minus_rem:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: negw a0, a0
; CHECK-NEXT: andi a0, a0, 1024
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%urem = urem i64 1024, %vscale
%VTC = sub i64 1024, %urem
ret i64 %VTC
}
; Maximum VLEN=64k implies Maximum vscale=1024.
; TODO: This should fold to 2048
define i64 @con2048_minus_rem() {
; CHECK-LABEL: con2048_minus_rem:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: addiw a1, a1, -2048
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%urem = urem i64 2048, %vscale
%VTC = sub i64 2048, %urem
ret i64 %VTC
}