| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s -check-prefixes=RV32IXQCCMP |
| ; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck %s -check-prefixes=RV64IXQCCMP |
| ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp -frame-pointer=all \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IXQCCMP-FP |
| ; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp -frame-pointer=all \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IXQCCMP-FP |
| ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqccmp,+save-restore \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IXQCCMP-SR |
| ; RUN: llc -mtriple=riscv64 -mattr=+experimental-xqccmp,+save-restore \ |
| ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IXQCCMP-SR |
| |
| declare void @test(ptr) |
| declare void @callee_void(ptr) |
| declare i32 @callee(ptr) |
| |
| define i32 @foo() { |
| ; RV32IXQCCMP-LABEL: foo: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra}, -64 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: addi sp, sp, -464 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 528 |
| ; RV32IXQCCMP-NEXT: mv a0, sp |
| ; RV32IXQCCMP-NEXT: call test |
| ; RV32IXQCCMP-NEXT: addi sp, sp, 464 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-NEXT: qc.cm.popretz {ra}, 64 |
| ; |
| ; RV64IXQCCMP-LABEL: foo: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra}, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: addi sp, sp, -464 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 528 |
| ; RV64IXQCCMP-NEXT: mv a0, sp |
| ; RV64IXQCCMP-NEXT: call test |
| ; RV64IXQCCMP-NEXT: addi sp, sp, 464 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-NEXT: qc.cm.popretz {ra}, 64 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: foo: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, -464 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 528 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, s0, -528 |
| ; RV32IXQCCMP-FP-NEXT: call test |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 528 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, 464 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popretz {ra, s0}, 64 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: foo: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, -464 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 528 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, s0, -528 |
| ; RV64IXQCCMP-FP-NEXT: call test |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 528 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, 464 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popretz {ra, s0}, 64 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: foo: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, -464 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 528 |
| ; RV32IXQCCMP-SR-NEXT: mv a0, sp |
| ; RV32IXQCCMP-SR-NEXT: call test |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, 464 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popretz {ra}, 64 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: foo: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, -464 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 528 |
| ; RV64IXQCCMP-SR-NEXT: mv a0, sp |
| ; RV64IXQCCMP-SR-NEXT: call test |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, 464 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popretz {ra}, 64 |
| %1 = alloca [512 x i8] |
| %2 = getelementptr [512 x i8], ptr %1, i32 0, i32 0 |
| call void @test(ptr %2) |
| ret i32 0 |
| } |
| |
| define i32 @pushpopret0(i32 signext %size) { |
| ; RV32IXQCCMP-LABEL: pushpopret0: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: call callee_void |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.popretz {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: pushpopret0: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: call callee_void |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-NEXT: qc.cm.popretz {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: pushpopret0: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: call callee_void |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popretz {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: pushpopret0: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: call callee_void |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popretz {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: pushpopret0: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: call callee_void |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popretz {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: pushpopret0: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: call callee_void |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popretz {ra, s0}, 16 |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| call void @callee_void(ptr nonnull %0) |
| ret i32 0 |
| } |
| |
| define i32 @pushpopret1(i32 signext %size) { |
| ; RV32IXQCCMP-LABEL: pushpopret1: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: call callee_void |
| ; RV32IXQCCMP-NEXT: li a0, 1 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: pushpopret1: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: call callee_void |
| ; RV64IXQCCMP-NEXT: li a0, 1 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: pushpopret1: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: call callee_void |
| ; RV32IXQCCMP-FP-NEXT: li a0, 1 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: pushpopret1: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: call callee_void |
| ; RV64IXQCCMP-FP-NEXT: li a0, 1 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: pushpopret1: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: call callee_void |
| ; RV32IXQCCMP-SR-NEXT: li a0, 1 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: pushpopret1: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: call callee_void |
| ; RV64IXQCCMP-SR-NEXT: li a0, 1 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| call void @callee_void(ptr nonnull %0) |
| ret i32 1 |
| } |
| |
| define i32 @pushpopretneg1(i32 signext %size) { |
| ; RV32IXQCCMP-LABEL: pushpopretneg1: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: call callee_void |
| ; RV32IXQCCMP-NEXT: li a0, -1 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: pushpopretneg1: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: call callee_void |
| ; RV64IXQCCMP-NEXT: li a0, -1 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: pushpopretneg1: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: call callee_void |
| ; RV32IXQCCMP-FP-NEXT: li a0, -1 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: pushpopretneg1: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: call callee_void |
| ; RV64IXQCCMP-FP-NEXT: li a0, -1 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: pushpopretneg1: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: call callee_void |
| ; RV32IXQCCMP-SR-NEXT: li a0, -1 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: pushpopretneg1: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: call callee_void |
| ; RV64IXQCCMP-SR-NEXT: li a0, -1 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| call void @callee_void(ptr nonnull %0) |
| ret i32 -1 |
| } |
| |
| define i32 @pushpopret2(i32 signext %size) { |
| ; RV32IXQCCMP-LABEL: pushpopret2: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: call callee_void |
| ; RV32IXQCCMP-NEXT: li a0, 2 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: pushpopret2: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: call callee_void |
| ; RV64IXQCCMP-NEXT: li a0, 2 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: pushpopret2: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: call callee_void |
| ; RV32IXQCCMP-FP-NEXT: li a0, 2 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: pushpopret2: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: call callee_void |
| ; RV64IXQCCMP-FP-NEXT: li a0, 2 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: pushpopret2: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: call callee_void |
| ; RV32IXQCCMP-SR-NEXT: li a0, 2 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: pushpopret2: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: call callee_void |
| ; RV64IXQCCMP-SR-NEXT: li a0, 2 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0}, 16 |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| call void @callee_void(ptr nonnull %0) |
| ret i32 2 |
| } |
| |
| define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 { |
| ; RV32IXQCCMP-LABEL: tailcall: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV32IXQCCMP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-LABEL: tailcall: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV64IXQCCMP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-NEXT: tail callee |
| ; |
| ; RV32IXQCCMP-FP-LABEL: tailcall: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-FP-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-FP-LABEL: tailcall: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-FP-NEXT: tail callee |
| ; |
| ; RV32IXQCCMP-SR-LABEL: tailcall: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-SR-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-SR-LABEL: tailcall: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0}, 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-SR-NEXT: tail callee |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| %1 = tail call i32 @callee(ptr nonnull %0) |
| ret i32 %1 |
| } |
| |
| @var = global [5 x i32] zeroinitializer |
| define i32 @nocompress(i32 signext %size) { |
| ; RV32IXQCCMP-LABEL: nocompress: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s8}, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub s2, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, s2 |
| ; RV32IXQCCMP-NEXT: lui s1, %hi(var) |
| ; RV32IXQCCMP-NEXT: lw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-NEXT: addi s7, s1, %lo(var) |
| ; RV32IXQCCMP-NEXT: lw s8, 16(s7) |
| ; RV32IXQCCMP-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-NEXT: call callee_void |
| ; RV32IXQCCMP-NEXT: sw s8, 16(s7) |
| ; RV32IXQCCMP-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-NEXT: sw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 48 |
| ; RV32IXQCCMP-NEXT: qc.cm.pop {ra, s0-s8}, 48 |
| ; RV32IXQCCMP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-LABEL: nocompress: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s8}, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 80 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub s2, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, s2 |
| ; RV64IXQCCMP-NEXT: lui s1, %hi(var) |
| ; RV64IXQCCMP-NEXT: lw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-NEXT: addi s7, s1, %lo(var) |
| ; RV64IXQCCMP-NEXT: lw s8, 16(s7) |
| ; RV64IXQCCMP-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-NEXT: call callee_void |
| ; RV64IXQCCMP-NEXT: sw s8, 16(s7) |
| ; RV64IXQCCMP-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-NEXT: sw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 80 |
| ; RV64IXQCCMP-NEXT: qc.cm.pop {ra, s0-s8}, 80 |
| ; RV64IXQCCMP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-NEXT: tail callee |
| ; |
| ; RV32IXQCCMP-FP-LABEL: nocompress: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s8}, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub s2, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, s2 |
| ; RV32IXQCCMP-FP-NEXT: lui s1, %hi(var) |
| ; RV32IXQCCMP-FP-NEXT: lw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-FP-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-FP-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-FP-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-FP-NEXT: addi s7, s1, %lo(var) |
| ; RV32IXQCCMP-FP-NEXT: lw s8, 16(s7) |
| ; RV32IXQCCMP-FP-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-FP-NEXT: call callee_void |
| ; RV32IXQCCMP-FP-NEXT: sw s8, 16(s7) |
| ; RV32IXQCCMP-FP-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-FP-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-FP-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-FP-NEXT: sw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-FP-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 48 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0-s8}, 48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-FP-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-FP-LABEL: nocompress: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s8}, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub s2, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, s2 |
| ; RV64IXQCCMP-FP-NEXT: lui s1, %hi(var) |
| ; RV64IXQCCMP-FP-NEXT: lw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-FP-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-FP-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-FP-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-FP-NEXT: addi s7, s1, %lo(var) |
| ; RV64IXQCCMP-FP-NEXT: lw s8, 16(s7) |
| ; RV64IXQCCMP-FP-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-FP-NEXT: call callee_void |
| ; RV64IXQCCMP-FP-NEXT: sw s8, 16(s7) |
| ; RV64IXQCCMP-FP-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-FP-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-FP-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-FP-NEXT: sw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-FP-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 80 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0-s8}, 80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-FP-NEXT: tail callee |
| ; |
| ; RV32IXQCCMP-SR-LABEL: nocompress: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s8}, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub s2, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, s2 |
| ; RV32IXQCCMP-SR-NEXT: lui s1, %hi(var) |
| ; RV32IXQCCMP-SR-NEXT: lw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-SR-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-SR-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-SR-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-SR-NEXT: addi s7, s1, %lo(var) |
| ; RV32IXQCCMP-SR-NEXT: lw s8, 16(s7) |
| ; RV32IXQCCMP-SR-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-SR-NEXT: call callee_void |
| ; RV32IXQCCMP-SR-NEXT: sw s8, 16(s7) |
| ; RV32IXQCCMP-SR-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV32IXQCCMP-SR-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV32IXQCCMP-SR-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV32IXQCCMP-SR-NEXT: sw s3, %lo(var)(s1) |
| ; RV32IXQCCMP-SR-NEXT: mv a0, s2 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 48 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s8}, 48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-SR-NEXT: tail callee |
| ; |
| ; RV64IXQCCMP-SR-LABEL: nocompress: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s8}, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub s2, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, s2 |
| ; RV64IXQCCMP-SR-NEXT: lui s1, %hi(var) |
| ; RV64IXQCCMP-SR-NEXT: lw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-SR-NEXT: lw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-SR-NEXT: lw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-SR-NEXT: lw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-SR-NEXT: addi s7, s1, %lo(var) |
| ; RV64IXQCCMP-SR-NEXT: lw s8, 16(s7) |
| ; RV64IXQCCMP-SR-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-SR-NEXT: call callee_void |
| ; RV64IXQCCMP-SR-NEXT: sw s8, 16(s7) |
| ; RV64IXQCCMP-SR-NEXT: sw s6, %lo(var+12)(s1) |
| ; RV64IXQCCMP-SR-NEXT: sw s5, %lo(var+8)(s1) |
| ; RV64IXQCCMP-SR-NEXT: sw s4, %lo(var+4)(s1) |
| ; RV64IXQCCMP-SR-NEXT: sw s3, %lo(var)(s1) |
| ; RV64IXQCCMP-SR-NEXT: mv a0, s2 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 80 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s8}, 80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-SR-NEXT: tail callee |
| entry: |
| %0 = alloca i8, i32 %size, align 16 |
| %val = load [5 x i32], ptr @var |
| call void @callee_void(ptr nonnull %0) |
| store volatile [5 x i32] %val, ptr @var |
| %1 = tail call i32 @callee(ptr nonnull %0) |
| ret i32 %1 |
| } |
| |
| ; Check that functions with varargs do not use save/restore code |
| |
| declare void @llvm.va_start(ptr) |
| declare void @llvm.va_end(ptr) |
| |
| define i32 @varargs(ptr %fmt, ...) { |
| ; RV32IXQCCMP-LABEL: varargs: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-NEXT: mv a0, a1 |
| ; RV32IXQCCMP-NEXT: sw a5, 36(sp) |
| ; RV32IXQCCMP-NEXT: sw a6, 40(sp) |
| ; RV32IXQCCMP-NEXT: sw a7, 44(sp) |
| ; RV32IXQCCMP-NEXT: sw a1, 20(sp) |
| ; RV32IXQCCMP-NEXT: sw a2, 24(sp) |
| ; RV32IXQCCMP-NEXT: sw a3, 28(sp) |
| ; RV32IXQCCMP-NEXT: sw a4, 32(sp) |
| ; RV32IXQCCMP-NEXT: addi a1, sp, 24 |
| ; RV32IXQCCMP-NEXT: sw a1, 12(sp) |
| ; RV32IXQCCMP-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-NEXT: ret |
| ; |
| ; RV64IXQCCMP-LABEL: varargs: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: addi sp, sp, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 80 |
| ; RV64IXQCCMP-NEXT: sd a1, 24(sp) |
| ; RV64IXQCCMP-NEXT: addi a0, sp, 28 |
| ; RV64IXQCCMP-NEXT: sd a0, 8(sp) |
| ; RV64IXQCCMP-NEXT: lw a0, 24(sp) |
| ; RV64IXQCCMP-NEXT: sd a5, 56(sp) |
| ; RV64IXQCCMP-NEXT: sd a6, 64(sp) |
| ; RV64IXQCCMP-NEXT: sd a7, 72(sp) |
| ; RV64IXQCCMP-NEXT: sd a2, 32(sp) |
| ; RV64IXQCCMP-NEXT: sd a3, 40(sp) |
| ; RV64IXQCCMP-NEXT: sd a4, 48(sp) |
| ; RV64IXQCCMP-NEXT: addi sp, sp, 80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-NEXT: ret |
| ; |
| ; RV32IXQCCMP-FP-LABEL: varargs: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-FP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -36 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -40 |
| ; RV32IXQCCMP-FP-NEXT: addi s0, sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 32 |
| ; RV32IXQCCMP-FP-NEXT: mv a0, a1 |
| ; RV32IXQCCMP-FP-NEXT: sw a5, 20(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a6, 24(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a7, 28(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 4(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 8(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 12(s0) |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 16(s0) |
| ; RV32IXQCCMP-FP-NEXT: addi a1, s0, 8 |
| ; RV32IXQCCMP-FP-NEXT: sw a1, -12(s0) |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 48 |
| ; RV32IXQCCMP-FP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-FP-NEXT: ret |
| ; |
| ; RV64IXQCCMP-FP-LABEL: varargs: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, -96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 96 |
| ; RV64IXQCCMP-FP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -72 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -80 |
| ; RV64IXQCCMP-FP-NEXT: addi s0, sp, 32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 64 |
| ; RV64IXQCCMP-FP-NEXT: sd a1, 8(s0) |
| ; RV64IXQCCMP-FP-NEXT: addi a0, s0, 12 |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -24(s0) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 8(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a5, 40(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a6, 48(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a7, 56(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a2, 16(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a3, 24(s0) |
| ; RV64IXQCCMP-FP-NEXT: sd a4, 32(s0) |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 96 |
| ; RV64IXQCCMP-FP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, 96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-FP-NEXT: ret |
| ; |
| ; RV32IXQCCMP-SR-LABEL: varargs: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 48 |
| ; RV32IXQCCMP-SR-NEXT: mv a0, a1 |
| ; RV32IXQCCMP-SR-NEXT: sw a5, 36(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a6, 40(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a7, 44(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 20(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 24(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 28(sp) |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 32(sp) |
| ; RV32IXQCCMP-SR-NEXT: addi a1, sp, 24 |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 12(sp) |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-SR-NEXT: ret |
| ; |
| ; RV64IXQCCMP-SR-LABEL: varargs: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 80 |
| ; RV64IXQCCMP-SR-NEXT: sd a1, 24(sp) |
| ; RV64IXQCCMP-SR-NEXT: addi a0, sp, 28 |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 24(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a5, 56(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a6, 64(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a7, 72(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a2, 32(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a3, 40(sp) |
| ; RV64IXQCCMP-SR-NEXT: sd a4, 48(sp) |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, 80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-SR-NEXT: ret |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %argp.cur = load ptr, ptr %va |
| %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4 |
| store ptr %argp.next, ptr %va |
| %1 = load i32, ptr %argp.cur |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |
| |
| @var0 = global [18 x i32] zeroinitializer |
| |
| define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) { |
| ; RV32IXQCCMP-LABEL: many_args: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s4}, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-NEXT: lui a0, %hi(var0) |
| ; RV32IXQCCMP-NEXT: lw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-NEXT: addi a5, a0, %lo(var0) |
| ; RV32IXQCCMP-NEXT: lw t2, 16(a5) |
| ; RV32IXQCCMP-NEXT: lw t3, 20(a5) |
| ; RV32IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: lw t6, 48(a5) |
| ; RV32IXQCCMP-NEXT: lw s2, 52(a5) |
| ; RV32IXQCCMP-NEXT: lw a3, 56(a5) |
| ; RV32IXQCCMP-NEXT: lw a4, 60(a5) |
| ; RV32IXQCCMP-NEXT: lw a1, 64(a5) |
| ; RV32IXQCCMP-NEXT: lw s0, 68(a5) |
| ; RV32IXQCCMP-NEXT: lw s3, 32(a5) |
| ; RV32IXQCCMP-NEXT: lw s4, 36(a5) |
| ; RV32IXQCCMP-NEXT: lw s1, 40(a5) |
| ; RV32IXQCCMP-NEXT: lw a2, 44(a5) |
| ; RV32IXQCCMP-NEXT: sw s0, 68(a5) |
| ; RV32IXQCCMP-NEXT: sw a1, 64(a5) |
| ; RV32IXQCCMP-NEXT: sw a4, 60(a5) |
| ; RV32IXQCCMP-NEXT: sw a3, 56(a5) |
| ; RV32IXQCCMP-NEXT: sw s2, 52(a5) |
| ; RV32IXQCCMP-NEXT: sw t6, 48(a5) |
| ; RV32IXQCCMP-NEXT: sw a2, 44(a5) |
| ; RV32IXQCCMP-NEXT: sw s1, 40(a5) |
| ; RV32IXQCCMP-NEXT: sw s4, 36(a5) |
| ; RV32IXQCCMP-NEXT: sw s3, 32(a5) |
| ; RV32IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: sw t3, 20(a5) |
| ; RV32IXQCCMP-NEXT: sw t2, 16(a5) |
| ; RV32IXQCCMP-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-NEXT: sw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s4}, 32 |
| ; |
| ; RV64IXQCCMP-LABEL: many_args: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra, s0-s4}, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-NEXT: lui a0, %hi(var0) |
| ; RV64IXQCCMP-NEXT: lw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-NEXT: addi a5, a0, %lo(var0) |
| ; RV64IXQCCMP-NEXT: lw t2, 16(a5) |
| ; RV64IXQCCMP-NEXT: lw t3, 20(a5) |
| ; RV64IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: lw t6, 48(a5) |
| ; RV64IXQCCMP-NEXT: lw s2, 52(a5) |
| ; RV64IXQCCMP-NEXT: lw a3, 56(a5) |
| ; RV64IXQCCMP-NEXT: lw a4, 60(a5) |
| ; RV64IXQCCMP-NEXT: lw a1, 64(a5) |
| ; RV64IXQCCMP-NEXT: lw s0, 68(a5) |
| ; RV64IXQCCMP-NEXT: lw s3, 32(a5) |
| ; RV64IXQCCMP-NEXT: lw s4, 36(a5) |
| ; RV64IXQCCMP-NEXT: lw s1, 40(a5) |
| ; RV64IXQCCMP-NEXT: lw a2, 44(a5) |
| ; RV64IXQCCMP-NEXT: sw s0, 68(a5) |
| ; RV64IXQCCMP-NEXT: sw a1, 64(a5) |
| ; RV64IXQCCMP-NEXT: sw a4, 60(a5) |
| ; RV64IXQCCMP-NEXT: sw a3, 56(a5) |
| ; RV64IXQCCMP-NEXT: sw s2, 52(a5) |
| ; RV64IXQCCMP-NEXT: sw t6, 48(a5) |
| ; RV64IXQCCMP-NEXT: sw a2, 44(a5) |
| ; RV64IXQCCMP-NEXT: sw s1, 40(a5) |
| ; RV64IXQCCMP-NEXT: sw s4, 36(a5) |
| ; RV64IXQCCMP-NEXT: sw s3, 32(a5) |
| ; RV64IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: sw t3, 20(a5) |
| ; RV64IXQCCMP-NEXT: sw t2, 16(a5) |
| ; RV64IXQCCMP-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-NEXT: sw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s4}, 48 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: many_args: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s5}, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: lui a0, %hi(var0) |
| ; RV32IXQCCMP-FP-NEXT: lw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-FP-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-FP-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-FP-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-FP-NEXT: addi a5, a0, %lo(var0) |
| ; RV32IXQCCMP-FP-NEXT: lw t2, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t3, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t6, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s3, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s5, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a4, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a1, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s1, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s2, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s4, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a2, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a3, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s1, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s5, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s3, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t6, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s4, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s2, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t3, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t2, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-FP-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-FP-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-FP-NEXT: sw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 32 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s5}, 32 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: many_args: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s5}, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: lui a0, %hi(var0) |
| ; RV64IXQCCMP-FP-NEXT: lw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-FP-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-FP-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-FP-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-FP-NEXT: addi a5, a0, %lo(var0) |
| ; RV64IXQCCMP-FP-NEXT: lw t2, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t3, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t6, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s3, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s5, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a4, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a1, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s1, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s2, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s4, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a2, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a3, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s1, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a1, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a4, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s5, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s3, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t6, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a3, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a2, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s4, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s2, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t3, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t2, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-FP-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-FP-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-FP-NEXT: sw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 64 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s5}, 64 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: many_args: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s4}, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-SR-NEXT: lui a0, %hi(var0) |
| ; RV32IXQCCMP-SR-NEXT: lw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-SR-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-SR-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-SR-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-SR-NEXT: addi a5, a0, %lo(var0) |
| ; RV32IXQCCMP-SR-NEXT: lw t2, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t3, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t6, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s2, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a3, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a4, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a1, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s0, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s3, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s4, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s1, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a2, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s0, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s2, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t6, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s1, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s4, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s3, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t3, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t2, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV32IXQCCMP-SR-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV32IXQCCMP-SR-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV32IXQCCMP-SR-NEXT: sw a6, %lo(var0)(a0) |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s4}, 32 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: many_args: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s4}, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-SR-NEXT: lui a0, %hi(var0) |
| ; RV64IXQCCMP-SR-NEXT: lw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-SR-NEXT: lw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-SR-NEXT: lw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-SR-NEXT: lw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-SR-NEXT: addi a5, a0, %lo(var0) |
| ; RV64IXQCCMP-SR-NEXT: lw t2, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t3, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t6, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s2, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a3, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a4, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a1, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s0, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s3, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s4, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s1, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a2, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s0, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a1, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a4, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a3, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s2, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t6, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a2, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s1, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s4, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s3, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t3, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t2, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t1, %lo(var0+12)(a0) |
| ; RV64IXQCCMP-SR-NEXT: sw t0, %lo(var0+8)(a0) |
| ; RV64IXQCCMP-SR-NEXT: sw a7, %lo(var0+4)(a0) |
| ; RV64IXQCCMP-SR-NEXT: sw a6, %lo(var0)(a0) |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s4}, 48 |
| entry: |
| %val = load [18 x i32], ptr @var0 |
| store volatile [18 x i32] %val, ptr @var0 |
| ret void |
| } |
| |
| ; Check that dynamic allocation calculations remain correct |
| |
| declare ptr @llvm.stacksave() |
| declare void @llvm.stackrestore(ptr) |
| declare void @notdead(ptr) |
| |
| define void @alloca(i32 %n) { |
| ; RV32IXQCCMP-LABEL: alloca: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: mv s1, sp |
| ; RV32IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-NEXT: call notdead |
| ; RV32IXQCCMP-NEXT: mv sp, s1 |
| ; RV32IXQCCMP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: alloca: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: mv s1, sp |
| ; RV64IXQCCMP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-NEXT: call notdead |
| ; RV64IXQCCMP-NEXT: mv sp, s1 |
| ; RV64IXQCCMP-NEXT: addi sp, s0, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 32 |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: alloca: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: mv s1, sp |
| ; RV32IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-FP-NEXT: call notdead |
| ; RV32IXQCCMP-FP-NEXT: mv sp, s1 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: alloca: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: mv s1, sp |
| ; RV64IXQCCMP-FP-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-FP-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-FP-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-FP-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-FP-NEXT: call notdead |
| ; RV64IXQCCMP-FP-NEXT: mv sp, s1 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, s0, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 32 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: alloca: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: mv s1, sp |
| ; RV32IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV32IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV32IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV32IXQCCMP-SR-NEXT: call notdead |
| ; RV32IXQCCMP-SR-NEXT: mv sp, s1 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, s0, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: alloca: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: mv s1, sp |
| ; RV64IXQCCMP-SR-NEXT: slli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: srli a0, a0, 32 |
| ; RV64IXQCCMP-SR-NEXT: addi a0, a0, 15 |
| ; RV64IXQCCMP-SR-NEXT: andi a0, a0, -16 |
| ; RV64IXQCCMP-SR-NEXT: sub a0, sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: mv sp, a0 |
| ; RV64IXQCCMP-SR-NEXT: call notdead |
| ; RV64IXQCCMP-SR-NEXT: mv sp, s1 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, s0, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 32 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| %sp = call ptr @llvm.stacksave() |
| %addr = alloca i8, i32 %n |
| call void @notdead(ptr %addr) |
| call void @llvm.stackrestore(ptr %sp) |
| ret void |
| } |
| |
| declare i32 @foo_test_irq(...) |
| @var_test_irq = global [32 x i32] zeroinitializer |
| |
| define void @foo_with_irq() "interrupt"="machine" { |
| ; RV32IXQCCMP-LABEL: foo_with_irq: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra}, -64 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: addi sp, sp, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 80 |
| ; RV32IXQCCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: .cfi_offset t0, -20 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t1, -24 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t2, -28 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a0, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a1, -36 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a2, -40 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a3, -44 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a4, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a5, -52 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a6, -56 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a7, -60 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t3, -64 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t4, -68 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t5, -72 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t6, -76 |
| ; RV32IXQCCMP-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-NEXT: addi sp, sp, 16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-NEXT: qc.cm.pop {ra}, 64 |
| ; RV32IXQCCMP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-NEXT: mret |
| ; |
| ; RV64IXQCCMP-LABEL: foo_with_irq: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra}, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: addi sp, sp, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 144 |
| ; RV64IXQCCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: .cfi_offset t0, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t1, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t2, -40 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a0, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a1, -56 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a2, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a3, -72 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a4, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a5, -88 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a6, -96 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a7, -104 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t3, -112 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t4, -120 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t5, -128 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t6, -136 |
| ; RV64IXQCCMP-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-NEXT: addi sp, sp, 80 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-NEXT: qc.cm.pop {ra}, 64 |
| ; RV64IXQCCMP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-NEXT: mret |
| ; |
| ; RV32IXQCCMP-FP-LABEL: foo_with_irq: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 80 |
| ; RV32IXQCCMP-FP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -20 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -24 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -28 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -36 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -40 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -44 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -52 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -56 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -60 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -68 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -72 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -76 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 80 |
| ; RV32IXQCCMP-FP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0}, 64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-FP-NEXT: mret |
| ; |
| ; RV64IXQCCMP-FP-LABEL: foo_with_irq: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 144 |
| ; RV64IXQCCMP-FP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t0, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t1, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t2, -40 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a0, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a1, -56 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a2, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a3, -72 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a4, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a5, -88 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a6, -96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a7, -104 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t3, -112 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t4, -120 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t5, -128 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t6, -136 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 144 |
| ; RV64IXQCCMP-FP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, 80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0}, 64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-FP-NEXT: mret |
| ; |
| ; RV32IXQCCMP-SR-LABEL: foo_with_irq: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 80 |
| ; RV32IXQCCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -20 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -24 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -28 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -36 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -40 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -44 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -52 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -56 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -60 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -68 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -72 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -76 |
| ; RV32IXQCCMP-SR-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra}, 64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-SR-NEXT: mret |
| ; |
| ; RV64IXQCCMP-SR-LABEL: foo_with_irq: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra}, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 144 |
| ; RV64IXQCCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -40 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -56 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -72 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -88 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -96 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -104 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -112 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -120 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -128 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -136 |
| ; RV64IXQCCMP-SR-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, 80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra}, 64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-SR-NEXT: mret |
| %call = call i32 @foo_test_irq() |
| ret void |
| } |
| |
| define void @foo_no_irq() { |
| ; RV32IXQCCMP-LABEL: foo_no_irq: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra}, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra}, 16 |
| ; |
| ; RV64IXQCCMP-LABEL: foo_no_irq: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra}, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra}, 16 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: foo_no_irq: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: foo_no_irq: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0}, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0}, 16 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: foo_no_irq: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra}, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: call foo_test_irq |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra}, 16 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: foo_no_irq: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra}, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: call foo_test_irq |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra}, 16 |
| %call = call i32 @foo_test_irq() |
| ret void |
| } |
| |
| define void @callee_with_irq() "interrupt"="machine" { |
| ; RV32IXQCCMP-LABEL: callee_with_irq: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -112 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV32IXQCCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: .cfi_offset t0, -68 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t1, -72 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t2, -76 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a0, -80 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a1, -84 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a2, -88 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a3, -92 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a4, -96 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a5, -100 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a6, -104 |
| ; RV32IXQCCMP-NEXT: .cfi_offset a7, -108 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t3, -112 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t4, -116 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t5, -120 |
| ; RV32IXQCCMP-NEXT: .cfi_offset t6, -124 |
| ; RV32IXQCCMP-NEXT: lui t0, %hi(var_test_irq) |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV32IXQCCMP-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-NEXT: lw s1, 80(a5) |
| ; RV32IXQCCMP-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-NEXT: lw t1, 92(a5) |
| ; RV32IXQCCMP-NEXT: lw a7, 112(a5) |
| ; RV32IXQCCMP-NEXT: lw s0, 116(a5) |
| ; RV32IXQCCMP-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-NEXT: lw a6, 96(a5) |
| ; RV32IXQCCMP-NEXT: lw a4, 100(a5) |
| ; RV32IXQCCMP-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-NEXT: sw s0, 116(a5) |
| ; RV32IXQCCMP-NEXT: sw a7, 112(a5) |
| ; RV32IXQCCMP-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-NEXT: sw a4, 100(a5) |
| ; RV32IXQCCMP-NEXT: sw a6, 96(a5) |
| ; RV32IXQCCMP-NEXT: sw t1, 92(a5) |
| ; RV32IXQCCMP-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-NEXT: sw s1, 80(a5) |
| ; RV32IXQCCMP-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-NEXT: qc.cm.pop {ra, s0-s11}, 112 |
| ; RV32IXQCCMP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s9 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s10 |
| ; RV32IXQCCMP-NEXT: .cfi_restore s11 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-NEXT: mret |
| ; |
| ; RV64IXQCCMP-LABEL: callee_with_irq: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-NEXT: addi sp, sp, -128 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 288 |
| ; RV64IXQCCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: .cfi_offset t0, -120 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t1, -128 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t2, -136 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a0, -144 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a1, -152 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a2, -160 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a3, -168 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a4, -176 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a5, -184 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a6, -192 |
| ; RV64IXQCCMP-NEXT: .cfi_offset a7, -200 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t3, -208 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t4, -216 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t5, -224 |
| ; RV64IXQCCMP-NEXT: .cfi_offset t6, -232 |
| ; RV64IXQCCMP-NEXT: lui t0, %hi(var_test_irq) |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV64IXQCCMP-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-NEXT: lw s1, 80(a5) |
| ; RV64IXQCCMP-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-NEXT: lw t1, 92(a5) |
| ; RV64IXQCCMP-NEXT: lw a7, 112(a5) |
| ; RV64IXQCCMP-NEXT: lw s0, 116(a5) |
| ; RV64IXQCCMP-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-NEXT: lw a6, 96(a5) |
| ; RV64IXQCCMP-NEXT: lw a4, 100(a5) |
| ; RV64IXQCCMP-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-NEXT: sw s0, 116(a5) |
| ; RV64IXQCCMP-NEXT: sw a7, 112(a5) |
| ; RV64IXQCCMP-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-NEXT: sw a4, 100(a5) |
| ; RV64IXQCCMP-NEXT: sw a6, 96(a5) |
| ; RV64IXQCCMP-NEXT: sw t1, 92(a5) |
| ; RV64IXQCCMP-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-NEXT: sw s1, 80(a5) |
| ; RV64IXQCCMP-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-NEXT: addi sp, sp, 128 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-NEXT: qc.cm.pop {ra, s0-s11}, 160 |
| ; RV64IXQCCMP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s9 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s10 |
| ; RV64IXQCCMP-NEXT: .cfi_restore s11 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-NEXT: mret |
| ; |
| ; RV32IXQCCMP-FP-LABEL: callee_with_irq: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -112 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV32IXQCCMP-FP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t0, -68 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t1, -72 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t2, -76 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a0, -80 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a1, -84 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a2, -88 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a3, -92 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a4, -96 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a5, -100 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a6, -104 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset a7, -108 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t3, -112 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t4, -116 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t5, -120 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset t6, -124 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -128(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -132(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -136(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -140(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -144(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -148(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -152(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t4, 80(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s1, 92(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t0, 112(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a4, 116(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a7, 96(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a6, 100(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 116(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t0, 112(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a6, 100(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a7, 96(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s1, 92(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t4, 80(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -152(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -148(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -144(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -140(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -136(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -132(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -128(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1) |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 160 |
| ; RV32IXQCCMP-FP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-FP-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0-s11}, 112 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s9 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s10 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_restore s11 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-FP-NEXT: mret |
| ; |
| ; RV64IXQCCMP-FP-LABEL: callee_with_irq: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, -128 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 288 |
| ; RV64IXQCCMP-FP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t0, -120 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t1, -128 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t2, -136 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a0, -144 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a1, -152 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a2, -160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a3, -168 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a4, -176 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a5, -184 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a6, -192 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset a7, -200 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t3, -208 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t4, -216 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t5, -224 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset t6, -232 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -240(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -248(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -256(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -264(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -272(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -280(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -288(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t4, 80(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s1, 92(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t0, 112(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a4, 116(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a7, 96(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a6, 100(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a4, 116(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t0, 112(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a6, 100(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a7, 96(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s1, 92(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t4, 80(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -288(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -280(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -272(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -264(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -256(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -248(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -240(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1) |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 288 |
| ; RV64IXQCCMP-FP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, 128 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pop {ra, s0-s11}, 160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s9 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s10 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_restore s11 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-FP-NEXT: mret |
| ; |
| ; RV32IXQCCMP-SR-LABEL: callee_with_irq: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -112 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160 |
| ; RV32IXQCCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t0, -68 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t1, -72 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t2, -76 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a0, -80 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a1, -84 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a2, -88 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a3, -92 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a4, -96 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a5, -100 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a6, -104 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset a7, -108 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t3, -112 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t4, -116 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t5, -120 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset t6, -124 |
| ; RV32IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s1, 80(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t1, 92(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a7, 112(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s0, 116(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a6, 96(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a4, 100(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s0, 116(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a7, 112(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 100(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a6, 96(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t1, 92(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s1, 80(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a6 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore a7 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore t6 |
| ; RV32IXQCCMP-SR-NEXT: addi sp, sp, 48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 112 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s11}, 112 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s2 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s3 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s5 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s6 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s7 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s9 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s10 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_restore s11 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV32IXQCCMP-SR-NEXT: mret |
| ; |
| ; RV64IXQCCMP-SR-LABEL: callee_with_irq: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, -128 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 288 |
| ; RV64IXQCCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t0, -120 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t1, -128 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t2, -136 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a0, -144 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a1, -152 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a2, -160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a3, -168 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a4, -176 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a5, -184 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a6, -192 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset a7, -200 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t3, -208 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t4, -216 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t5, -224 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset t6, -232 |
| ; RV64IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s1, 80(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t1, 92(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a7, 112(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s0, 116(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a6, 96(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a4, 100(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s0, 116(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a7, 112(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a4, 100(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a6, 96(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t1, 92(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s1, 80(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a6 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore a7 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore t6 |
| ; RV64IXQCCMP-SR-NEXT: addi sp, sp, 128 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pop {ra, s0-s11}, 160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore ra |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s0 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s2 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s3 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s4 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s5 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s6 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s7 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s9 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s10 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_restore s11 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 0 |
| ; RV64IXQCCMP-SR-NEXT: mret |
| %val = load [32 x i32], ptr @var_test_irq |
| store volatile [32 x i32] %val, ptr @var_test_irq |
| ret void |
| } |
| |
| define void @callee_no_irq() { |
| ; RV32IXQCCMP-LABEL: callee_no_irq: |
| ; RV32IXQCCMP: # %bb.0: |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -96 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 96 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-NEXT: lui t0, %hi(var_test_irq) |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV32IXQCCMP-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-NEXT: lw s1, 80(a5) |
| ; RV32IXQCCMP-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-NEXT: lw t1, 92(a5) |
| ; RV32IXQCCMP-NEXT: lw a7, 112(a5) |
| ; RV32IXQCCMP-NEXT: lw s0, 116(a5) |
| ; RV32IXQCCMP-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-NEXT: lw a6, 96(a5) |
| ; RV32IXQCCMP-NEXT: lw a4, 100(a5) |
| ; RV32IXQCCMP-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-NEXT: sw s0, 116(a5) |
| ; RV32IXQCCMP-NEXT: sw a7, 112(a5) |
| ; RV32IXQCCMP-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-NEXT: sw a4, 100(a5) |
| ; RV32IXQCCMP-NEXT: sw a6, 96(a5) |
| ; RV32IXQCCMP-NEXT: sw t1, 92(a5) |
| ; RV32IXQCCMP-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-NEXT: sw s1, 80(a5) |
| ; RV32IXQCCMP-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 96 |
| ; |
| ; RV64IXQCCMP-LABEL: callee_no_irq: |
| ; RV64IXQCCMP: # %bb.0: |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-NEXT: lui t0, %hi(var_test_irq) |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV64IXQCCMP-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-NEXT: lw s1, 80(a5) |
| ; RV64IXQCCMP-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-NEXT: lw t1, 92(a5) |
| ; RV64IXQCCMP-NEXT: lw a7, 112(a5) |
| ; RV64IXQCCMP-NEXT: lw s0, 116(a5) |
| ; RV64IXQCCMP-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-NEXT: lw a6, 96(a5) |
| ; RV64IXQCCMP-NEXT: lw a4, 100(a5) |
| ; RV64IXQCCMP-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-NEXT: sw s0, 116(a5) |
| ; RV64IXQCCMP-NEXT: sw a7, 112(a5) |
| ; RV64IXQCCMP-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-NEXT: sw a4, 100(a5) |
| ; RV64IXQCCMP-NEXT: sw a6, 96(a5) |
| ; RV64IXQCCMP-NEXT: sw t1, 92(a5) |
| ; RV64IXQCCMP-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-NEXT: sw s1, 80(a5) |
| ; RV64IXQCCMP-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 160 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: callee_no_irq: |
| ; RV32IXQCCMP-FP: # %bb.0: |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -96 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 96 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -68(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -72(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -76(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -80(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -84(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -88(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, -92(s0) # 4-byte Folded Spill |
| ; RV32IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t4, 80(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw s1, 92(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw t0, 112(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a4, 116(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a7, 96(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a6, 100(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a4, 116(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t0, 112(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a6, 100(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw a7, 96(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s1, 92(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t4, 80(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -92(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 24(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -88(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -84(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -80(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -76(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -72(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1) |
| ; RV32IXQCCMP-FP-NEXT: lw a0, -68(s0) # 4-byte Folded Reload |
| ; RV32IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1) |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 96 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 96 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: callee_no_irq: |
| ; RV64IXQCCMP-FP: # %bb.0: |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 176 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: lui t1, %hi(var_test_irq) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -120(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+4)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -128(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+8)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -136(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, %lo(var_test_irq+12)(t1) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -144(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: addi a5, t1, %lo(var_test_irq) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -152(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -160(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: sd a0, -168(s0) # 8-byte Folded Spill |
| ; RV64IXQCCMP-FP-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t4, 80(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw s1, 92(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw t0, 112(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a4, 116(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a7, 96(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a6, 100(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-FP-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a4, 116(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t0, 112(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a6, 100(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw a7, 96(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s1, 92(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t4, 80(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-FP-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -168(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 24(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -160(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -152(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -144(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+12)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -136(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+8)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -128(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq+4)(t1) |
| ; RV64IXQCCMP-FP-NEXT: ld a0, -120(s0) # 8-byte Folded Reload |
| ; RV64IXQCCMP-FP-NEXT: sw a0, %lo(var_test_irq)(t1) |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 176 |
| ; RV64IXQCCMP-FP-NEXT: addi sp, sp, 16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 160 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: callee_no_irq: |
| ; RV32IXQCCMP-SR: # %bb.0: |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -96 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 96 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s2, -16 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s3, -20 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s4, -24 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s5, -28 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s6, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s7, -36 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s8, -40 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s9, -44 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill |
| ; RV32IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t6, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s2, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s3, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s4, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s5, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s6, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s7, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s8, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s9, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s10, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s11, 72(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw ra, 76(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s1, 80(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t3, 84(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t2, 88(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw t1, 92(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a7, 112(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw s0, 116(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a3, 120(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 124(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a6, 96(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a4, 100(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a2, 104(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a1, 108(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 124(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a3, 120(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s0, 116(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a7, 112(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a1, 108(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a2, 104(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a4, 100(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw a6, 96(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t1, 92(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t2, 88(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t3, 84(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s1, 80(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw ra, 76(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s11, 72(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s10, 68(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s9, 64(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s8, 60(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s7, 56(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s6, 52(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s5, 48(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s4, 44(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s3, 40(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw s2, 36(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t6, 32(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV32IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 20(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, 16(a5) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV32IXQCCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload |
| ; RV32IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 96 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: callee_no_irq: |
| ; RV64IXQCCMP-SR: # %bb.0: |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 160 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s2, -32 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s3, -40 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s4, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s5, -56 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s6, -64 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s7, -72 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s8, -80 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s9, -88 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-SR-NEXT: lui t0, %hi(var_test_irq) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill |
| ; RV64IXQCCMP-SR-NEXT: lw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t6, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s2, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s3, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s4, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s5, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s6, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s7, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s8, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s9, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s10, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s11, 72(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw ra, 76(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s1, 80(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t3, 84(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t2, 88(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw t1, 92(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a7, 112(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw s0, 116(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a3, 120(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a0, 124(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a6, 96(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a4, 100(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a2, 104(a5) |
| ; RV64IXQCCMP-SR-NEXT: lw a1, 108(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 124(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a3, 120(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s0, 116(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a7, 112(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a1, 108(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a2, 104(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a4, 100(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw a6, 96(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t1, 92(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t2, 88(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t3, 84(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s1, 80(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw ra, 76(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s11, 72(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s10, 68(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s9, 64(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s8, 60(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s7, 56(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s6, 52(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s5, 48(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s4, 44(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s3, 40(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw s2, 36(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t6, 32(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t5, 28(a5) |
| ; RV64IXQCCMP-SR-NEXT: sw t4, 24(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 20(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, 16(a5) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0) |
| ; RV64IXQCCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload |
| ; RV64IXQCCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0) |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 160 |
| %val = load [32 x i32], ptr @var_test_irq |
| store volatile [32 x i32] %val, ptr @var_test_irq |
| ret void |
| } |
| |
| declare void @bar(ptr, ptr) |
| declare ptr @llvm.frameaddress.p0(i32 immarg) |
| |
| define i32 @use_fp(i32 %x) { |
| ; RV32IXQCCMP-LABEL: use_fp: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-NEXT: mv s1, a0 |
| ; RV32IXQCCMP-NEXT: addi a1, s0, -20 |
| ; RV32IXQCCMP-NEXT: mv a0, s0 |
| ; RV32IXQCCMP-NEXT: call bar |
| ; RV32IXQCCMP-NEXT: mv a0, s1 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa sp, 32 |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| ; |
| ; RV64IXQCCMP-LABEL: use_fp: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.pushfp {ra, s0-s1}, -48 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64IXQCCMP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-NEXT: mv s1, a0 |
| ; RV64IXQCCMP-NEXT: addi a1, s0, -36 |
| ; RV64IXQCCMP-NEXT: mv a0, s0 |
| ; RV64IXQCCMP-NEXT: call bar |
| ; RV64IXQCCMP-NEXT: mv a0, s1 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa sp, 48 |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s1}, 48 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: use_fp: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: mv s1, a0 |
| ; RV32IXQCCMP-FP-NEXT: addi a1, s0, -20 |
| ; RV32IXQCCMP-FP-NEXT: mv a0, s0 |
| ; RV32IXQCCMP-FP-NEXT: call bar |
| ; RV32IXQCCMP-FP-NEXT: mv a0, s1 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 32 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: use_fp: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s1}, -48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: mv s1, a0 |
| ; RV64IXQCCMP-FP-NEXT: addi a1, s0, -36 |
| ; RV64IXQCCMP-FP-NEXT: mv a0, s0 |
| ; RV64IXQCCMP-FP-NEXT: call bar |
| ; RV64IXQCCMP-FP-NEXT: mv a0, s1 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 48 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s1}, 48 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: use_fp: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 32 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s1, -12 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-SR-NEXT: mv s1, a0 |
| ; RV32IXQCCMP-SR-NEXT: addi a1, s0, -20 |
| ; RV32IXQCCMP-SR-NEXT: mv a0, s0 |
| ; RV32IXQCCMP-SR-NEXT: call bar |
| ; RV32IXQCCMP-SR-NEXT: mv a0, s1 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 32 |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 32 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: use_fp: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.pushfp {ra, s0-s1}, -48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 48 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s1, -24 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-SR-NEXT: mv s1, a0 |
| ; RV64IXQCCMP-SR-NEXT: addi a1, s0, -36 |
| ; RV64IXQCCMP-SR-NEXT: mv a0, s0 |
| ; RV64IXQCCMP-SR-NEXT: call bar |
| ; RV64IXQCCMP-SR-NEXT: mv a0, s1 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa sp, 48 |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s1}, 48 |
| entry: |
| %var = alloca i32, align 4 |
| %0 = tail call ptr @llvm.frameaddress.p0(i32 0) |
| call void @bar(ptr %0, ptr %var) |
| ret i32 %x |
| } |
| |
| define void @spill_x10() { |
| ; RV32IXQCCMP-LABEL: spill_x10: |
| ; RV32IXQCCMP: # %bb.0: # %entry |
| ; RV32IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -64 |
| ; RV32IXQCCMP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-NEXT: #APP |
| ; RV32IXQCCMP-NEXT: li s10, 0 |
| ; RV32IXQCCMP-NEXT: #NO_APP |
| ; RV32IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 64 |
| ; |
| ; RV64IXQCCMP-LABEL: spill_x10: |
| ; RV64IXQCCMP: # %bb.0: # %entry |
| ; RV64IXQCCMP-NEXT: qc.cm.push {ra, s0-s11}, -112 |
| ; RV64IXQCCMP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-NEXT: #APP |
| ; RV64IXQCCMP-NEXT: li s10, 0 |
| ; RV64IXQCCMP-NEXT: #NO_APP |
| ; RV64IXQCCMP-NEXT: qc.cm.popret {ra, s0-s11}, 112 |
| ; |
| ; RV32IXQCCMP-FP-LABEL: spill_x10: |
| ; RV32IXQCCMP-FP: # %bb.0: # %entry |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset ra, -4 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s0, -8 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV32IXQCCMP-FP-NEXT: #APP |
| ; RV32IXQCCMP-FP-NEXT: li s10, 0 |
| ; RV32IXQCCMP-FP-NEXT: #NO_APP |
| ; RV32IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 64 |
| ; RV32IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 64 |
| ; |
| ; RV64IXQCCMP-FP-LABEL: spill_x10: |
| ; RV64IXQCCMP-FP: # %bb.0: # %entry |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.pushfp {ra, s0-s11}, -112 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa_offset 112 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset ra, -8 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s0, -16 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa s0, 0 |
| ; RV64IXQCCMP-FP-NEXT: #APP |
| ; RV64IXQCCMP-FP-NEXT: li s10, 0 |
| ; RV64IXQCCMP-FP-NEXT: #NO_APP |
| ; RV64IXQCCMP-FP-NEXT: .cfi_def_cfa sp, 112 |
| ; RV64IXQCCMP-FP-NEXT: qc.cm.popret {ra, s0-s11}, 112 |
| ; |
| ; RV32IXQCCMP-SR-LABEL: spill_x10: |
| ; RV32IXQCCMP-SR: # %bb.0: # %entry |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 64 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s10, -48 |
| ; RV32IXQCCMP-SR-NEXT: .cfi_offset s11, -52 |
| ; RV32IXQCCMP-SR-NEXT: #APP |
| ; RV32IXQCCMP-SR-NEXT: li s10, 0 |
| ; RV32IXQCCMP-SR-NEXT: #NO_APP |
| ; RV32IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 64 |
| ; |
| ; RV64IXQCCMP-SR-LABEL: spill_x10: |
| ; RV64IXQCCMP-SR: # %bb.0: # %entry |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.push {ra, s0-s11}, -112 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_def_cfa_offset 112 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s10, -96 |
| ; RV64IXQCCMP-SR-NEXT: .cfi_offset s11, -104 |
| ; RV64IXQCCMP-SR-NEXT: #APP |
| ; RV64IXQCCMP-SR-NEXT: li s10, 0 |
| ; RV64IXQCCMP-SR-NEXT: #NO_APP |
| ; RV64IXQCCMP-SR-NEXT: qc.cm.popret {ra, s0-s11}, 112 |
| entry: |
| tail call void asm sideeffect "li s10, 0", "~{s10}"() |
| ret void |
| } |