blob: 3c9bc0a0194f8adb895cd7cdb449eada1e1d0514 [file] [edit]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
# RUN: llc -mtriple=riscv64 -mcpu=spacemit-x60 -run-pass=machine-scheduler %s -o - | FileCheck %s
# The purpose of this test is to make sure COPYs from physical vector register stay at the top
# of the function in order to reduce the physical register's live range.
---
name: vector-copy-schedule
tracksRegLiveness: true
noPhis: true
body: |
bb.0:
liveins: $x10, $v0, $v8m8, $v16m8
; CHECK-LABEL: name: vector-copy-schedule
; CHECK: liveins: $x10, $v0, $v8m8, $v16m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vrm8nov0 = COPY $v16m8
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrm8nov0 = COPY $v8m8
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v0
; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 64
; CHECK-NEXT: $v0 = COPY [[COPY2]]
; CHECK-NEXT: [[PseudoVCPOP_M_B2_:%[0-9]+]]:gprnox0 = PseudoVCPOP_M_B2 [[COPY2]], [[ADDI]], 0 /* e8 */
; CHECK-NEXT: early-clobber %5:vrm8 = PseudoVIOTA_M_M8 undef %5, [[COPY2]], [[ADDI]], 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: [[PseudoVLE16_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16_V_M8 undef [[PseudoVLE16_V_M8_]], [[COPY3]], [[PseudoVCPOP_M_B2_]], 4 /* e16 */, 2 /* tu, ma */
; CHECK-NEXT: early-clobber [[COPY1]]:vrm8nov0 = PseudoVRGATHER_VV_M8_E16_MASK [[COPY1]], [[PseudoVLE16_V_M8_]], %5, $v0, [[ADDI]], 4 /* e16 */, 1 /* ta, mu */
; CHECK-NEXT: [[PseudoVSLIDEDOWN_VI_M1_:%[0-9]+]]:vr = PseudoVSLIDEDOWN_VI_M1 undef [[PseudoVSLIDEDOWN_VI_M1_]], [[COPY2]], 8, 8, 3 /* e8 */, 3 /* ta, ma */
; CHECK-NEXT: [[PseudoVCPOP_M_B2_1:%[0-9]+]]:gprnox0 = PseudoVCPOP_M_B2 [[PseudoVSLIDEDOWN_VI_M1_]], [[ADDI]], 0 /* e8 */
; CHECK-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[PseudoVCPOP_M_B2_]], 1
; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY3]], [[SLLI]]
; CHECK-NEXT: [[PseudoVLE16_V_M8_1:%[0-9]+]]:vrm8 = PseudoVLE16_V_M8 undef [[PseudoVLE16_V_M8_1]], [[ADD]], [[PseudoVCPOP_M_B2_1]], 4 /* e16 */, 2 /* tu, ma */
; CHECK-NEXT: early-clobber %13:vrm8 = PseudoVIOTA_M_M8 undef %13, [[PseudoVSLIDEDOWN_VI_M1_]], [[ADDI]], 4 /* e16 */, 0 /* tu, mu */
; CHECK-NEXT: $v0 = COPY [[PseudoVSLIDEDOWN_VI_M1_]]
; CHECK-NEXT: early-clobber [[COPY]]:vrm8nov0 = PseudoVRGATHER_VV_M8_E16_MASK [[COPY]], [[PseudoVLE16_V_M8_1]], %13, $v0, [[ADDI]], 4 /* e16 */, 1 /* ta, mu */
; CHECK-NEXT: $v8m8 = COPY [[COPY1]]
; CHECK-NEXT: $v16m8 = COPY [[COPY]]
; CHECK-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
%23:vrm8nov0 = COPY $v16m8
%12:vrm8nov0 = COPY $v8m8
%6:vr = COPY $v0
%0:gpr = COPY $x10
%8:gprnox0 = ADDI $x0, 64
early-clobber %9:vrm8 = PseudoVIOTA_M_M8 undef %9, %6, %8, 4 /* e16 */, 0 /* tu, mu */
%10:gprnox0 = PseudoVCPOP_M_B2 %6, %8, 0 /* e8 */
%11:vrm8 = PseudoVLE16_V_M8 undef %11, %0, %10, 4 /* e16 */, 2 /* tu, ma */
$v0 = COPY %6
early-clobber %12:vrm8nov0 = PseudoVRGATHER_VV_M8_E16_MASK %12, %11, %9, $v0, %8, 4 /* e16 */, 1 /* ta, mu */
%16:vr = PseudoVSLIDEDOWN_VI_M1 undef %16, %6, 8, 8, 3 /* e8 */, 3 /* ta, ma */
%18:gprnox0 = PseudoVCPOP_M_B2 %16, %8, 0 /* e8 */
%19:gpr = SLLI %10, 1
%20:gpr = ADD %0, %19
%21:vrm8 = PseudoVLE16_V_M8 undef %21, %20, %18, 4 /* e16 */, 2 /* tu, ma */
early-clobber %22:vrm8 = PseudoVIOTA_M_M8 undef %22, %16, %8, 4 /* e16 */, 0 /* tu, mu */
$v0 = COPY %16
early-clobber %23:vrm8nov0 = PseudoVRGATHER_VV_M8_E16_MASK %23, %21, %22, $v0, %8, 4 /* e16 */, 1 /* ta, mu */
$v8m8 = COPY %12
$v16m8 = COPY %23
PseudoRET implicit $v8m8, implicit $v16m8
...