blob: ef64eeb9b18691ab7fa634734e5526e72c544a40 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64I
; This test case test the LocalStackSlotAllocation pass that use a base register
; for the frame index that its offset is out-of-range (for RISC-V. the immediate
; is 12 bits for the load store instruction (excludes vector load / store))
define void @use_frame_base_reg() {
; RV32I-LABEL: use_frame_base_reg:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1712
; RV32I-NEXT: sub sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 100016
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1704
; RV32I-NEXT: add a0, sp, a0
; RV32I-NEXT: lbu a1, 4(a0)
; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: addi a0, a0, 1712
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: use_frame_base_reg:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1712
; RV64I-NEXT: sub sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 100016
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1704
; RV64I-NEXT: add a0, sp, a0
; RV64I-NEXT: lbu a1, 4(a0)
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: addiw a0, a0, 1712
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: ret
%va = alloca i8, align 4
%va1 = alloca i8, align 4
%large = alloca [ 100000 x i8 ]
%argp.cur = load volatile i8, ptr %va, align 4
%argp.next = load volatile i8, ptr %va1, align 4
ret void
}
; Test containing a load with its own local offset. Make sure isFrameOffsetLegal
; considers it and does not create a virtual base register.
define void @load_with_offset() {
; RV32I-LABEL: load_with_offset:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 25
; RV32I-NEXT: addi a0, a0, -1792
; RV32I-NEXT: sub sp, sp, a0
; RV32I-NEXT: .cfi_def_cfa_offset 100608
; RV32I-NEXT: lui a0, 25
; RV32I-NEXT: add a0, sp, a0
; RV32I-NEXT: lbu a0, -292(a0)
; RV32I-NEXT: lui a0, 24
; RV32I-NEXT: add a0, sp, a0
; RV32I-NEXT: lbu a0, 1704(a0)
; RV32I-NEXT: lui a0, 25
; RV32I-NEXT: addi a0, a0, -1792
; RV32I-NEXT: add sp, sp, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: load_with_offset:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 25
; RV64I-NEXT: addiw a0, a0, -1792
; RV64I-NEXT: sub sp, sp, a0
; RV64I-NEXT: .cfi_def_cfa_offset 100608
; RV64I-NEXT: lui a0, 25
; RV64I-NEXT: add a0, sp, a0
; RV64I-NEXT: lbu a0, -292(a0)
; RV64I-NEXT: lui a0, 24
; RV64I-NEXT: add a0, sp, a0
; RV64I-NEXT: lbu a0, 1704(a0)
; RV64I-NEXT: lui a0, 25
; RV64I-NEXT: addiw a0, a0, -1792
; RV64I-NEXT: add sp, sp, a0
; RV64I-NEXT: ret
%va = alloca [100 x i8], align 4
%va1 = alloca [500 x i8], align 4
%large = alloca [100000 x i8]
%va_gep = getelementptr [100 x i8], ptr %va, i64 16
%va1_gep = getelementptr [100 x i8], ptr %va1, i64 0
%load = load volatile i8, ptr %va_gep, align 4
%load1 = load volatile i8, ptr %va1_gep, align 4
ret void
}