| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefix=RV32I %s |
| ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefix=RV32IM %s |
| ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefix=RV64I %s |
| ; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefix=RV64IM %s |
| |
| define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: fold_srem_vec_1: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lh s0, 12(a1) |
| ; RV32I-NEXT: lh s1, 8(a1) |
| ; RV32I-NEXT: lh s2, 4(a1) |
| ; RV32I-NEXT: lh a2, 0(a1) |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, a2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s4, a0 |
| ; RV32I-NEXT: li a1, -124 |
| ; RV32I-NEXT: mv a0, s2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s2, a0 |
| ; RV32I-NEXT: li a1, 98 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s1, a0 |
| ; RV32I-NEXT: li a1, -1003 |
| ; RV32I-NEXT: mv a0, s0 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: sh a0, 6(s3) |
| ; RV32I-NEXT: sh s1, 4(s3) |
| ; RV32I-NEXT: sh s2, 2(s3) |
| ; RV32I-NEXT: sh s4, 0(s3) |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: fold_srem_vec_1: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 12(a1) |
| ; RV32IM-NEXT: lh a3, 8(a1) |
| ; RV32IM-NEXT: lh a4, 0(a1) |
| ; RV32IM-NEXT: lh a1, 4(a1) |
| ; RV32IM-NEXT: lui a5, 706409 |
| ; RV32IM-NEXT: addi a5, a5, 389 |
| ; RV32IM-NEXT: mulh a5, a4, a5 |
| ; RV32IM-NEXT: add a5, a5, a4 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 6 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: li a6, 95 |
| ; RV32IM-NEXT: mul a5, a5, a6 |
| ; RV32IM-NEXT: sub a4, a4, a5 |
| ; RV32IM-NEXT: lui a5, 507375 |
| ; RV32IM-NEXT: addi a5, a5, 1981 |
| ; RV32IM-NEXT: mulh a5, a1, a5 |
| ; RV32IM-NEXT: sub a5, a5, a1 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 6 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: li a6, -124 |
| ; RV32IM-NEXT: mul a5, a5, a6 |
| ; RV32IM-NEXT: sub a1, a1, a5 |
| ; RV32IM-NEXT: lui a5, 342392 |
| ; RV32IM-NEXT: addi a5, a5, 669 |
| ; RV32IM-NEXT: mulh a5, a3, a5 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 5 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: li a6, 98 |
| ; RV32IM-NEXT: mul a5, a5, a6 |
| ; RV32IM-NEXT: sub a3, a3, a5 |
| ; RV32IM-NEXT: lui a5, 780943 |
| ; RV32IM-NEXT: addi a5, a5, 1809 |
| ; RV32IM-NEXT: mulh a5, a2, a5 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 8 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: li a6, -1003 |
| ; RV32IM-NEXT: mul a5, a5, a6 |
| ; RV32IM-NEXT: sub a2, a2, a5 |
| ; RV32IM-NEXT: sh a2, 6(a0) |
| ; RV32IM-NEXT: sh a3, 4(a0) |
| ; RV32IM-NEXT: sh a1, 2(a0) |
| ; RV32IM-NEXT: sh a4, 0(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: fold_srem_vec_1: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: lh s0, 24(a1) |
| ; RV64I-NEXT: lh s1, 16(a1) |
| ; RV64I-NEXT: lh s2, 8(a1) |
| ; RV64I-NEXT: lh a2, 0(a1) |
| ; RV64I-NEXT: mv s3, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, a2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s4, a0 |
| ; RV64I-NEXT: li a1, -124 |
| ; RV64I-NEXT: mv a0, s2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: li a1, 98 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: li a1, -1003 |
| ; RV64I-NEXT: mv a0, s0 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sh a0, 6(s3) |
| ; RV64I-NEXT: sh s1, 4(s3) |
| ; RV64I-NEXT: sh s2, 2(s3) |
| ; RV64I-NEXT: sh s4, 0(s3) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s4, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: fold_srem_vec_1: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 0(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI0_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI0_0)(a3) |
| ; RV64IM-NEXT: lh a4, 24(a1) |
| ; RV64IM-NEXT: lh a5, 16(a1) |
| ; RV64IM-NEXT: lh a1, 8(a1) |
| ; RV64IM-NEXT: mulh a3, a2, a3 |
| ; RV64IM-NEXT: add a3, a3, a2 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 6 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: lui a6, %hi(.LCPI0_1) |
| ; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6) |
| ; RV64IM-NEXT: li a7, 95 |
| ; RV64IM-NEXT: mul a3, a3, a7 |
| ; RV64IM-NEXT: subw a2, a2, a3 |
| ; RV64IM-NEXT: mulh a3, a1, a6 |
| ; RV64IM-NEXT: sub a3, a3, a1 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 6 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: lui a6, %hi(.LCPI0_2) |
| ; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6) |
| ; RV64IM-NEXT: li a7, -124 |
| ; RV64IM-NEXT: mul a3, a3, a7 |
| ; RV64IM-NEXT: subw a1, a1, a3 |
| ; RV64IM-NEXT: mulh a3, a5, a6 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 5 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: lui a6, %hi(.LCPI0_3) |
| ; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6) |
| ; RV64IM-NEXT: li a7, 98 |
| ; RV64IM-NEXT: mul a3, a3, a7 |
| ; RV64IM-NEXT: subw a5, a5, a3 |
| ; RV64IM-NEXT: mulh a3, a4, a6 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 7 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: li a6, -1003 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: subw a4, a4, a3 |
| ; RV64IM-NEXT: sh a4, 6(a0) |
| ; RV64IM-NEXT: sh a5, 4(a0) |
| ; RV64IM-NEXT: sh a1, 2(a0) |
| ; RV64IM-NEXT: sh a2, 0(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 95, i16 -124, i16 98, i16 -1003> |
| ret <4 x i16> %1 |
| } |
| |
| define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: fold_srem_vec_2: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lh s0, 12(a1) |
| ; RV32I-NEXT: lh s1, 8(a1) |
| ; RV32I-NEXT: lh s2, 4(a1) |
| ; RV32I-NEXT: lh a2, 0(a1) |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, a2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s4, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s2, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s1, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s0 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: sh a0, 6(s3) |
| ; RV32I-NEXT: sh s1, 4(s3) |
| ; RV32I-NEXT: sh s2, 2(s3) |
| ; RV32I-NEXT: sh s4, 0(s3) |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: fold_srem_vec_2: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 12(a1) |
| ; RV32IM-NEXT: lh a3, 8(a1) |
| ; RV32IM-NEXT: lh a4, 0(a1) |
| ; RV32IM-NEXT: lh a1, 4(a1) |
| ; RV32IM-NEXT: lui a5, 706409 |
| ; RV32IM-NEXT: addi a5, a5, 389 |
| ; RV32IM-NEXT: mulh a6, a4, a5 |
| ; RV32IM-NEXT: add a6, a6, a4 |
| ; RV32IM-NEXT: srli a7, a6, 31 |
| ; RV32IM-NEXT: srli a6, a6, 6 |
| ; RV32IM-NEXT: add a6, a6, a7 |
| ; RV32IM-NEXT: li a7, 95 |
| ; RV32IM-NEXT: mul a6, a6, a7 |
| ; RV32IM-NEXT: sub a4, a4, a6 |
| ; RV32IM-NEXT: mulh a6, a1, a5 |
| ; RV32IM-NEXT: add a6, a6, a1 |
| ; RV32IM-NEXT: srli t0, a6, 31 |
| ; RV32IM-NEXT: srli a6, a6, 6 |
| ; RV32IM-NEXT: add a6, a6, t0 |
| ; RV32IM-NEXT: mul a6, a6, a7 |
| ; RV32IM-NEXT: sub a1, a1, a6 |
| ; RV32IM-NEXT: mulh a6, a3, a5 |
| ; RV32IM-NEXT: add a6, a6, a3 |
| ; RV32IM-NEXT: srli t0, a6, 31 |
| ; RV32IM-NEXT: srli a6, a6, 6 |
| ; RV32IM-NEXT: add a6, a6, t0 |
| ; RV32IM-NEXT: mul a6, a6, a7 |
| ; RV32IM-NEXT: sub a3, a3, a6 |
| ; RV32IM-NEXT: mulh a5, a2, a5 |
| ; RV32IM-NEXT: add a5, a5, a2 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 6 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: mul a5, a5, a7 |
| ; RV32IM-NEXT: sub a2, a2, a5 |
| ; RV32IM-NEXT: sh a2, 6(a0) |
| ; RV32IM-NEXT: sh a3, 4(a0) |
| ; RV32IM-NEXT: sh a1, 2(a0) |
| ; RV32IM-NEXT: sh a4, 0(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: fold_srem_vec_2: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: lh s0, 24(a1) |
| ; RV64I-NEXT: lh s1, 16(a1) |
| ; RV64I-NEXT: lh s2, 8(a1) |
| ; RV64I-NEXT: lh a2, 0(a1) |
| ; RV64I-NEXT: mv s3, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, a2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s4, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s0 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sh a0, 6(s3) |
| ; RV64I-NEXT: sh s1, 4(s3) |
| ; RV64I-NEXT: sh s2, 2(s3) |
| ; RV64I-NEXT: sh s4, 0(s3) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s4, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: fold_srem_vec_2: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 0(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI1_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI1_0)(a3) |
| ; RV64IM-NEXT: lh a4, 24(a1) |
| ; RV64IM-NEXT: lh a5, 16(a1) |
| ; RV64IM-NEXT: lh a1, 8(a1) |
| ; RV64IM-NEXT: mulh a6, a2, a3 |
| ; RV64IM-NEXT: add a6, a6, a2 |
| ; RV64IM-NEXT: srli a7, a6, 63 |
| ; RV64IM-NEXT: srli a6, a6, 6 |
| ; RV64IM-NEXT: add a6, a6, a7 |
| ; RV64IM-NEXT: li a7, 95 |
| ; RV64IM-NEXT: mul a6, a6, a7 |
| ; RV64IM-NEXT: subw a2, a2, a6 |
| ; RV64IM-NEXT: mulh a6, a1, a3 |
| ; RV64IM-NEXT: add a6, a6, a1 |
| ; RV64IM-NEXT: srli t0, a6, 63 |
| ; RV64IM-NEXT: srli a6, a6, 6 |
| ; RV64IM-NEXT: add a6, a6, t0 |
| ; RV64IM-NEXT: mul a6, a6, a7 |
| ; RV64IM-NEXT: subw a1, a1, a6 |
| ; RV64IM-NEXT: mulh a6, a5, a3 |
| ; RV64IM-NEXT: add a6, a6, a5 |
| ; RV64IM-NEXT: srli t0, a6, 63 |
| ; RV64IM-NEXT: srli a6, a6, 6 |
| ; RV64IM-NEXT: add a6, a6, t0 |
| ; RV64IM-NEXT: mul a6, a6, a7 |
| ; RV64IM-NEXT: subw a5, a5, a6 |
| ; RV64IM-NEXT: mulh a3, a4, a3 |
| ; RV64IM-NEXT: add a3, a3, a4 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 6 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: mul a3, a3, a7 |
| ; RV64IM-NEXT: subw a4, a4, a3 |
| ; RV64IM-NEXT: sh a4, 6(a0) |
| ; RV64IM-NEXT: sh a5, 4(a0) |
| ; RV64IM-NEXT: sh a1, 2(a0) |
| ; RV64IM-NEXT: sh a2, 0(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95> |
| ret <4 x i16> %1 |
| } |
| |
| |
| ; Don't fold if we can combine srem with sdiv. |
| define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: combine_srem_sdiv: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -48 |
| ; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lh s1, 0(a1) |
| ; RV32I-NEXT: lh s2, 4(a1) |
| ; RV32I-NEXT: lh s3, 8(a1) |
| ; RV32I-NEXT: lh s4, 12(a1) |
| ; RV32I-NEXT: mv s0, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s4 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s5, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s6, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s7, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s8, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s4 |
| ; RV32I-NEXT: call __divsi3@plt |
| ; RV32I-NEXT: mv s4, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: call __divsi3@plt |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s2 |
| ; RV32I-NEXT: call __divsi3@plt |
| ; RV32I-NEXT: mv s2, a0 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __divsi3@plt |
| ; RV32I-NEXT: add a0, s8, a0 |
| ; RV32I-NEXT: add s2, s7, s2 |
| ; RV32I-NEXT: add s3, s6, s3 |
| ; RV32I-NEXT: add s4, s5, s4 |
| ; RV32I-NEXT: sh s4, 6(s0) |
| ; RV32I-NEXT: sh s3, 4(s0) |
| ; RV32I-NEXT: sh s2, 2(s0) |
| ; RV32I-NEXT: sh a0, 0(s0) |
| ; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 48 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: combine_srem_sdiv: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 0(a1) |
| ; RV32IM-NEXT: lh a3, 4(a1) |
| ; RV32IM-NEXT: lh a4, 12(a1) |
| ; RV32IM-NEXT: lh a1, 8(a1) |
| ; RV32IM-NEXT: lui a5, 706409 |
| ; RV32IM-NEXT: addi a5, a5, 389 |
| ; RV32IM-NEXT: mulh a6, a4, a5 |
| ; RV32IM-NEXT: add a6, a6, a4 |
| ; RV32IM-NEXT: srli a7, a6, 31 |
| ; RV32IM-NEXT: srai a6, a6, 6 |
| ; RV32IM-NEXT: add a6, a6, a7 |
| ; RV32IM-NEXT: li a7, 95 |
| ; RV32IM-NEXT: mul t0, a6, a7 |
| ; RV32IM-NEXT: mulh t1, a1, a5 |
| ; RV32IM-NEXT: add t1, t1, a1 |
| ; RV32IM-NEXT: srli t2, t1, 31 |
| ; RV32IM-NEXT: srai t1, t1, 6 |
| ; RV32IM-NEXT: add t1, t1, t2 |
| ; RV32IM-NEXT: mul t2, t1, a7 |
| ; RV32IM-NEXT: mulh t3, a3, a5 |
| ; RV32IM-NEXT: add t3, t3, a3 |
| ; RV32IM-NEXT: srli t4, t3, 31 |
| ; RV32IM-NEXT: srai t3, t3, 6 |
| ; RV32IM-NEXT: add t3, t3, t4 |
| ; RV32IM-NEXT: mul t4, t3, a7 |
| ; RV32IM-NEXT: mulh a5, a2, a5 |
| ; RV32IM-NEXT: add a5, a5, a2 |
| ; RV32IM-NEXT: srli t5, a5, 31 |
| ; RV32IM-NEXT: srai a5, a5, 6 |
| ; RV32IM-NEXT: add a5, a5, t5 |
| ; RV32IM-NEXT: mul a7, a5, a7 |
| ; RV32IM-NEXT: add a2, a2, a5 |
| ; RV32IM-NEXT: sub a2, a2, a7 |
| ; RV32IM-NEXT: add a3, a3, t3 |
| ; RV32IM-NEXT: sub a3, a3, t4 |
| ; RV32IM-NEXT: add a1, a1, t1 |
| ; RV32IM-NEXT: sub a1, a1, t2 |
| ; RV32IM-NEXT: add a4, a4, a6 |
| ; RV32IM-NEXT: sub a4, a4, t0 |
| ; RV32IM-NEXT: sh a4, 6(a0) |
| ; RV32IM-NEXT: sh a1, 4(a0) |
| ; RV32IM-NEXT: sh a3, 2(a0) |
| ; RV32IM-NEXT: sh a2, 0(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: combine_srem_sdiv: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -80 |
| ; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 64(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 56(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 48(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s4, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s5, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s6, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s7, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s8, 0(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: lh s1, 0(a1) |
| ; RV64I-NEXT: lh s2, 8(a1) |
| ; RV64I-NEXT: lh s3, 16(a1) |
| ; RV64I-NEXT: lh s4, 24(a1) |
| ; RV64I-NEXT: mv s0, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s4 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s5, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s3 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s6, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s7, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s8, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s4 |
| ; RV64I-NEXT: call __divdi3@plt |
| ; RV64I-NEXT: mv s4, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s3 |
| ; RV64I-NEXT: call __divdi3@plt |
| ; RV64I-NEXT: mv s3, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s2 |
| ; RV64I-NEXT: call __divdi3@plt |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __divdi3@plt |
| ; RV64I-NEXT: add a0, s8, a0 |
| ; RV64I-NEXT: add s2, s7, s2 |
| ; RV64I-NEXT: add s3, s6, s3 |
| ; RV64I-NEXT: add s4, s5, s4 |
| ; RV64I-NEXT: sh s4, 6(s0) |
| ; RV64I-NEXT: sh s3, 4(s0) |
| ; RV64I-NEXT: sh s2, 2(s0) |
| ; RV64I-NEXT: sh a0, 0(s0) |
| ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 56(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 48(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s4, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s5, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s6, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s7, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s8, 0(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 80 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: combine_srem_sdiv: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 24(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI2_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI2_0)(a3) |
| ; RV64IM-NEXT: lh a4, 0(a1) |
| ; RV64IM-NEXT: lh a5, 8(a1) |
| ; RV64IM-NEXT: lh a1, 16(a1) |
| ; RV64IM-NEXT: mulh a6, a2, a3 |
| ; RV64IM-NEXT: add a6, a6, a2 |
| ; RV64IM-NEXT: srli a7, a6, 63 |
| ; RV64IM-NEXT: srai a6, a6, 6 |
| ; RV64IM-NEXT: add a6, a6, a7 |
| ; RV64IM-NEXT: li a7, 95 |
| ; RV64IM-NEXT: mul t0, a6, a7 |
| ; RV64IM-NEXT: mulh t1, a1, a3 |
| ; RV64IM-NEXT: add t1, t1, a1 |
| ; RV64IM-NEXT: srli t2, t1, 63 |
| ; RV64IM-NEXT: srai t1, t1, 6 |
| ; RV64IM-NEXT: add t1, t1, t2 |
| ; RV64IM-NEXT: mul t2, t1, a7 |
| ; RV64IM-NEXT: mulh t3, a5, a3 |
| ; RV64IM-NEXT: add t3, t3, a5 |
| ; RV64IM-NEXT: srli t4, t3, 63 |
| ; RV64IM-NEXT: srai t3, t3, 6 |
| ; RV64IM-NEXT: add t3, t3, t4 |
| ; RV64IM-NEXT: mul t4, t3, a7 |
| ; RV64IM-NEXT: mulh a3, a4, a3 |
| ; RV64IM-NEXT: add a3, a3, a4 |
| ; RV64IM-NEXT: srli t5, a3, 63 |
| ; RV64IM-NEXT: srai a3, a3, 6 |
| ; RV64IM-NEXT: add a3, a3, t5 |
| ; RV64IM-NEXT: mul a7, a3, a7 |
| ; RV64IM-NEXT: add a3, a4, a3 |
| ; RV64IM-NEXT: subw a3, a3, a7 |
| ; RV64IM-NEXT: add a5, a5, t3 |
| ; RV64IM-NEXT: subw a4, a5, t4 |
| ; RV64IM-NEXT: add a1, a1, t1 |
| ; RV64IM-NEXT: subw a1, a1, t2 |
| ; RV64IM-NEXT: add a2, a2, a6 |
| ; RV64IM-NEXT: subw a2, a2, t0 |
| ; RV64IM-NEXT: sh a2, 6(a0) |
| ; RV64IM-NEXT: sh a1, 4(a0) |
| ; RV64IM-NEXT: sh a4, 2(a0) |
| ; RV64IM-NEXT: sh a3, 0(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95> |
| %2 = sdiv <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95> |
| %3 = add <4 x i16> %1, %2 |
| ret <4 x i16> %3 |
| } |
| |
| ; Don't fold for divisors that are a power of two. |
| define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: dont_fold_srem_power_of_two: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: mv s0, a0 |
| ; RV32I-NEXT: lh a2, 0(a1) |
| ; RV32I-NEXT: lh a0, 12(a1) |
| ; RV32I-NEXT: lh a3, 8(a1) |
| ; RV32I-NEXT: lh a1, 4(a1) |
| ; RV32I-NEXT: srli a4, a2, 26 |
| ; RV32I-NEXT: add a4, a2, a4 |
| ; RV32I-NEXT: andi a4, a4, -64 |
| ; RV32I-NEXT: sub s1, a2, a4 |
| ; RV32I-NEXT: srli a2, a1, 27 |
| ; RV32I-NEXT: add a2, a1, a2 |
| ; RV32I-NEXT: andi a2, a2, -32 |
| ; RV32I-NEXT: sub s2, a1, a2 |
| ; RV32I-NEXT: srli a1, a3, 29 |
| ; RV32I-NEXT: add a1, a3, a1 |
| ; RV32I-NEXT: andi a1, a1, -8 |
| ; RV32I-NEXT: sub s3, a3, a1 |
| ; RV32I-NEXT: li a1, 95 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: sh a0, 6(s0) |
| ; RV32I-NEXT: sh s3, 4(s0) |
| ; RV32I-NEXT: sh s2, 2(s0) |
| ; RV32I-NEXT: sh s1, 0(s0) |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: dont_fold_srem_power_of_two: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 8(a1) |
| ; RV32IM-NEXT: lh a3, 4(a1) |
| ; RV32IM-NEXT: lh a4, 12(a1) |
| ; RV32IM-NEXT: lh a1, 0(a1) |
| ; RV32IM-NEXT: lui a5, 706409 |
| ; RV32IM-NEXT: addi a5, a5, 389 |
| ; RV32IM-NEXT: mulh a5, a4, a5 |
| ; RV32IM-NEXT: add a5, a5, a4 |
| ; RV32IM-NEXT: srli a6, a5, 31 |
| ; RV32IM-NEXT: srli a5, a5, 6 |
| ; RV32IM-NEXT: add a5, a5, a6 |
| ; RV32IM-NEXT: li a6, 95 |
| ; RV32IM-NEXT: mul a5, a5, a6 |
| ; RV32IM-NEXT: sub a4, a4, a5 |
| ; RV32IM-NEXT: srli a5, a1, 26 |
| ; RV32IM-NEXT: add a5, a1, a5 |
| ; RV32IM-NEXT: andi a5, a5, -64 |
| ; RV32IM-NEXT: sub a1, a1, a5 |
| ; RV32IM-NEXT: srli a5, a3, 27 |
| ; RV32IM-NEXT: add a5, a3, a5 |
| ; RV32IM-NEXT: andi a5, a5, -32 |
| ; RV32IM-NEXT: sub a3, a3, a5 |
| ; RV32IM-NEXT: srli a5, a2, 29 |
| ; RV32IM-NEXT: add a5, a2, a5 |
| ; RV32IM-NEXT: andi a5, a5, -8 |
| ; RV32IM-NEXT: sub a2, a2, a5 |
| ; RV32IM-NEXT: sh a2, 4(a0) |
| ; RV32IM-NEXT: sh a3, 2(a0) |
| ; RV32IM-NEXT: sh a1, 0(a0) |
| ; RV32IM-NEXT: sh a4, 6(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: dont_fold_srem_power_of_two: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: mv s0, a0 |
| ; RV64I-NEXT: lh a2, 0(a1) |
| ; RV64I-NEXT: lh a0, 24(a1) |
| ; RV64I-NEXT: lh a3, 16(a1) |
| ; RV64I-NEXT: lh a1, 8(a1) |
| ; RV64I-NEXT: srli a4, a2, 58 |
| ; RV64I-NEXT: add a4, a2, a4 |
| ; RV64I-NEXT: andi a4, a4, -64 |
| ; RV64I-NEXT: subw s1, a2, a4 |
| ; RV64I-NEXT: srli a2, a1, 59 |
| ; RV64I-NEXT: add a2, a1, a2 |
| ; RV64I-NEXT: andi a2, a2, -32 |
| ; RV64I-NEXT: subw s2, a1, a2 |
| ; RV64I-NEXT: srli a1, a3, 61 |
| ; RV64I-NEXT: add a1, a3, a1 |
| ; RV64I-NEXT: andi a1, a1, -8 |
| ; RV64I-NEXT: subw s3, a3, a1 |
| ; RV64I-NEXT: li a1, 95 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sh a0, 6(s0) |
| ; RV64I-NEXT: sh s3, 4(s0) |
| ; RV64I-NEXT: sh s2, 2(s0) |
| ; RV64I-NEXT: sh s1, 0(s0) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: dont_fold_srem_power_of_two: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 24(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI3_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI3_0)(a3) |
| ; RV64IM-NEXT: lh a4, 16(a1) |
| ; RV64IM-NEXT: lh a5, 8(a1) |
| ; RV64IM-NEXT: lh a1, 0(a1) |
| ; RV64IM-NEXT: mulh a3, a2, a3 |
| ; RV64IM-NEXT: add a3, a3, a2 |
| ; RV64IM-NEXT: srli a6, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 6 |
| ; RV64IM-NEXT: add a3, a3, a6 |
| ; RV64IM-NEXT: li a6, 95 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: subw a2, a2, a3 |
| ; RV64IM-NEXT: srli a3, a1, 58 |
| ; RV64IM-NEXT: add a3, a1, a3 |
| ; RV64IM-NEXT: andi a3, a3, -64 |
| ; RV64IM-NEXT: subw a1, a1, a3 |
| ; RV64IM-NEXT: srli a3, a5, 59 |
| ; RV64IM-NEXT: add a3, a5, a3 |
| ; RV64IM-NEXT: andi a3, a3, -32 |
| ; RV64IM-NEXT: subw a5, a5, a3 |
| ; RV64IM-NEXT: srli a3, a4, 61 |
| ; RV64IM-NEXT: add a3, a4, a3 |
| ; RV64IM-NEXT: andi a3, a3, -8 |
| ; RV64IM-NEXT: subw a4, a4, a3 |
| ; RV64IM-NEXT: sh a4, 4(a0) |
| ; RV64IM-NEXT: sh a5, 2(a0) |
| ; RV64IM-NEXT: sh a1, 0(a0) |
| ; RV64IM-NEXT: sh a2, 6(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 64, i16 32, i16 8, i16 95> |
| ret <4 x i16> %1 |
| } |
| |
| ; Don't fold if the divisor is one. |
| define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: dont_fold_srem_one: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lh s0, 12(a1) |
| ; RV32I-NEXT: lh s1, 8(a1) |
| ; RV32I-NEXT: lh a2, 4(a1) |
| ; RV32I-NEXT: mv s2, a0 |
| ; RV32I-NEXT: li a1, 654 |
| ; RV32I-NEXT: mv a0, a2 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: li a1, 23 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s1, a0 |
| ; RV32I-NEXT: lui a0, 1 |
| ; RV32I-NEXT: addi a1, a0, 1327 |
| ; RV32I-NEXT: mv a0, s0 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: sh a0, 6(s2) |
| ; RV32I-NEXT: sh s1, 4(s2) |
| ; RV32I-NEXT: sh s3, 2(s2) |
| ; RV32I-NEXT: sh zero, 0(s2) |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: dont_fold_srem_one: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 12(a1) |
| ; RV32IM-NEXT: lh a3, 4(a1) |
| ; RV32IM-NEXT: lh a1, 8(a1) |
| ; RV32IM-NEXT: lui a4, 820904 |
| ; RV32IM-NEXT: addi a4, a4, -1903 |
| ; RV32IM-NEXT: mulh a4, a3, a4 |
| ; RV32IM-NEXT: add a4, a4, a3 |
| ; RV32IM-NEXT: srli a5, a4, 31 |
| ; RV32IM-NEXT: srli a4, a4, 9 |
| ; RV32IM-NEXT: add a4, a4, a5 |
| ; RV32IM-NEXT: li a5, 654 |
| ; RV32IM-NEXT: mul a4, a4, a5 |
| ; RV32IM-NEXT: sub a3, a3, a4 |
| ; RV32IM-NEXT: lui a4, 729444 |
| ; RV32IM-NEXT: addi a4, a4, 713 |
| ; RV32IM-NEXT: mulh a4, a1, a4 |
| ; RV32IM-NEXT: add a4, a4, a1 |
| ; RV32IM-NEXT: srli a5, a4, 31 |
| ; RV32IM-NEXT: srli a4, a4, 4 |
| ; RV32IM-NEXT: add a4, a4, a5 |
| ; RV32IM-NEXT: li a5, 23 |
| ; RV32IM-NEXT: mul a4, a4, a5 |
| ; RV32IM-NEXT: sub a1, a1, a4 |
| ; RV32IM-NEXT: lui a4, 395996 |
| ; RV32IM-NEXT: addi a4, a4, -2009 |
| ; RV32IM-NEXT: mulh a4, a2, a4 |
| ; RV32IM-NEXT: srli a5, a4, 31 |
| ; RV32IM-NEXT: srli a4, a4, 11 |
| ; RV32IM-NEXT: add a4, a4, a5 |
| ; RV32IM-NEXT: lui a5, 1 |
| ; RV32IM-NEXT: addi a5, a5, 1327 |
| ; RV32IM-NEXT: mul a4, a4, a5 |
| ; RV32IM-NEXT: sub a2, a2, a4 |
| ; RV32IM-NEXT: sh zero, 0(a0) |
| ; RV32IM-NEXT: sh a2, 6(a0) |
| ; RV32IM-NEXT: sh a1, 4(a0) |
| ; RV32IM-NEXT: sh a3, 2(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: dont_fold_srem_one: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: lh s0, 24(a1) |
| ; RV64I-NEXT: lh s1, 16(a1) |
| ; RV64I-NEXT: lh a2, 8(a1) |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: li a1, 654 |
| ; RV64I-NEXT: mv a0, a2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s3, a0 |
| ; RV64I-NEXT: li a1, 23 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: lui a0, 1 |
| ; RV64I-NEXT: addiw a1, a0, 1327 |
| ; RV64I-NEXT: mv a0, s0 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sh a0, 6(s2) |
| ; RV64I-NEXT: sh s1, 4(s2) |
| ; RV64I-NEXT: sh s3, 2(s2) |
| ; RV64I-NEXT: sh zero, 0(s2) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: dont_fold_srem_one: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 16(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI4_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI4_0)(a3) |
| ; RV64IM-NEXT: lh a4, 24(a1) |
| ; RV64IM-NEXT: lh a1, 8(a1) |
| ; RV64IM-NEXT: mulh a3, a2, a3 |
| ; RV64IM-NEXT: add a3, a3, a2 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 4 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, %hi(.LCPI4_1) |
| ; RV64IM-NEXT: ld a5, %lo(.LCPI4_1)(a5) |
| ; RV64IM-NEXT: li a6, 23 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: subw a2, a2, a3 |
| ; RV64IM-NEXT: mulh a3, a1, a5 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 8 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, %hi(.LCPI4_2) |
| ; RV64IM-NEXT: ld a5, %lo(.LCPI4_2)(a5) |
| ; RV64IM-NEXT: li a6, 654 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: subw a1, a1, a3 |
| ; RV64IM-NEXT: mulh a3, a4, a5 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 11 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, 1 |
| ; RV64IM-NEXT: addiw a5, a5, 1327 |
| ; RV64IM-NEXT: mul a3, a3, a5 |
| ; RV64IM-NEXT: subw a4, a4, a3 |
| ; RV64IM-NEXT: sh zero, 0(a0) |
| ; RV64IM-NEXT: sh a4, 6(a0) |
| ; RV64IM-NEXT: sh a1, 2(a0) |
| ; RV64IM-NEXT: sh a2, 4(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 1, i16 654, i16 23, i16 5423> |
| ret <4 x i16> %1 |
| } |
| |
| ; Don't fold if the divisor is 2^15. |
| define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind { |
| ; RV32I-LABEL: dont_fold_urem_i16_smax: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -32 |
| ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lh a2, 4(a1) |
| ; RV32I-NEXT: mv s0, a0 |
| ; RV32I-NEXT: lh s1, 12(a1) |
| ; RV32I-NEXT: lh a0, 8(a1) |
| ; RV32I-NEXT: srli a1, a2, 17 |
| ; RV32I-NEXT: add a1, a2, a1 |
| ; RV32I-NEXT: lui a3, 8 |
| ; RV32I-NEXT: and a1, a1, a3 |
| ; RV32I-NEXT: sub s3, a2, a1 |
| ; RV32I-NEXT: li a1, 23 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: mv s2, a0 |
| ; RV32I-NEXT: lui a0, 1 |
| ; RV32I-NEXT: addi a1, a0, 1327 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: call __modsi3@plt |
| ; RV32I-NEXT: sh a0, 6(s0) |
| ; RV32I-NEXT: sh s2, 4(s0) |
| ; RV32I-NEXT: sh zero, 0(s0) |
| ; RV32I-NEXT: sh s3, 2(s0) |
| ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 32 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: dont_fold_urem_i16_smax: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: lh a2, 4(a1) |
| ; RV32IM-NEXT: lh a3, 8(a1) |
| ; RV32IM-NEXT: lh a1, 12(a1) |
| ; RV32IM-NEXT: lui a4, 729444 |
| ; RV32IM-NEXT: addi a4, a4, 713 |
| ; RV32IM-NEXT: mulh a4, a3, a4 |
| ; RV32IM-NEXT: add a4, a4, a3 |
| ; RV32IM-NEXT: srli a5, a4, 31 |
| ; RV32IM-NEXT: srli a4, a4, 4 |
| ; RV32IM-NEXT: add a4, a4, a5 |
| ; RV32IM-NEXT: li a5, 23 |
| ; RV32IM-NEXT: mul a4, a4, a5 |
| ; RV32IM-NEXT: sub a3, a3, a4 |
| ; RV32IM-NEXT: lui a4, 395996 |
| ; RV32IM-NEXT: addi a4, a4, -2009 |
| ; RV32IM-NEXT: mulh a4, a1, a4 |
| ; RV32IM-NEXT: srli a5, a4, 31 |
| ; RV32IM-NEXT: srli a4, a4, 11 |
| ; RV32IM-NEXT: add a4, a4, a5 |
| ; RV32IM-NEXT: lui a5, 1 |
| ; RV32IM-NEXT: addi a5, a5, 1327 |
| ; RV32IM-NEXT: mul a4, a4, a5 |
| ; RV32IM-NEXT: sub a1, a1, a4 |
| ; RV32IM-NEXT: srli a4, a2, 17 |
| ; RV32IM-NEXT: add a4, a2, a4 |
| ; RV32IM-NEXT: lui a5, 8 |
| ; RV32IM-NEXT: and a4, a4, a5 |
| ; RV32IM-NEXT: sub a2, a2, a4 |
| ; RV32IM-NEXT: sh zero, 0(a0) |
| ; RV32IM-NEXT: sh a2, 2(a0) |
| ; RV32IM-NEXT: sh a1, 6(a0) |
| ; RV32IM-NEXT: sh a3, 4(a0) |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: dont_fold_urem_i16_smax: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: lh a2, 8(a1) |
| ; RV64I-NEXT: mv s0, a0 |
| ; RV64I-NEXT: lh s1, 24(a1) |
| ; RV64I-NEXT: lh a0, 16(a1) |
| ; RV64I-NEXT: srli a1, a2, 49 |
| ; RV64I-NEXT: add a1, a2, a1 |
| ; RV64I-NEXT: lui a3, 8 |
| ; RV64I-NEXT: and a1, a1, a3 |
| ; RV64I-NEXT: subw s3, a2, a1 |
| ; RV64I-NEXT: li a1, 23 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: lui a0, 1 |
| ; RV64I-NEXT: addiw a1, a0, 1327 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sh a0, 6(s0) |
| ; RV64I-NEXT: sh s2, 4(s0) |
| ; RV64I-NEXT: sh zero, 0(s0) |
| ; RV64I-NEXT: sh s3, 2(s0) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: dont_fold_urem_i16_smax: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: lh a2, 16(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI5_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI5_0)(a3) |
| ; RV64IM-NEXT: lh a4, 24(a1) |
| ; RV64IM-NEXT: mulh a3, a2, a3 |
| ; RV64IM-NEXT: add a3, a3, a2 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 4 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: li a5, 23 |
| ; RV64IM-NEXT: lui a6, %hi(.LCPI5_1) |
| ; RV64IM-NEXT: ld a6, %lo(.LCPI5_1)(a6) |
| ; RV64IM-NEXT: mul a3, a3, a5 |
| ; RV64IM-NEXT: lh a1, 8(a1) |
| ; RV64IM-NEXT: subw a2, a2, a3 |
| ; RV64IM-NEXT: mulh a3, a4, a6 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srli a3, a3, 11 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, 1 |
| ; RV64IM-NEXT: addiw a5, a5, 1327 |
| ; RV64IM-NEXT: mul a3, a3, a5 |
| ; RV64IM-NEXT: subw a4, a4, a3 |
| ; RV64IM-NEXT: srli a3, a1, 49 |
| ; RV64IM-NEXT: add a3, a1, a3 |
| ; RV64IM-NEXT: lui a5, 8 |
| ; RV64IM-NEXT: and a3, a3, a5 |
| ; RV64IM-NEXT: subw a1, a1, a3 |
| ; RV64IM-NEXT: sh zero, 0(a0) |
| ; RV64IM-NEXT: sh a1, 2(a0) |
| ; RV64IM-NEXT: sh a4, 6(a0) |
| ; RV64IM-NEXT: sh a2, 4(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i16> %x, <i16 1, i16 32768, i16 23, i16 5423> |
| ret <4 x i16> %1 |
| } |
| |
| ; Don't fold i64 srem. |
| define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind { |
| ; RV32I-LABEL: dont_fold_srem_i64: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: addi sp, sp, -48 |
| ; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill |
| ; RV32I-NEXT: lw s1, 24(a1) |
| ; RV32I-NEXT: lw s2, 28(a1) |
| ; RV32I-NEXT: lw s3, 16(a1) |
| ; RV32I-NEXT: lw s4, 20(a1) |
| ; RV32I-NEXT: lw s5, 8(a1) |
| ; RV32I-NEXT: lw s6, 12(a1) |
| ; RV32I-NEXT: lw a3, 0(a1) |
| ; RV32I-NEXT: lw a1, 4(a1) |
| ; RV32I-NEXT: mv s0, a0 |
| ; RV32I-NEXT: li a2, 1 |
| ; RV32I-NEXT: mv a0, a3 |
| ; RV32I-NEXT: li a3, 0 |
| ; RV32I-NEXT: call __moddi3@plt |
| ; RV32I-NEXT: mv s7, a0 |
| ; RV32I-NEXT: mv s8, a1 |
| ; RV32I-NEXT: li a2, 654 |
| ; RV32I-NEXT: mv a0, s5 |
| ; RV32I-NEXT: mv a1, s6 |
| ; RV32I-NEXT: li a3, 0 |
| ; RV32I-NEXT: call __moddi3@plt |
| ; RV32I-NEXT: mv s5, a0 |
| ; RV32I-NEXT: mv s6, a1 |
| ; RV32I-NEXT: li a2, 23 |
| ; RV32I-NEXT: mv a0, s3 |
| ; RV32I-NEXT: mv a1, s4 |
| ; RV32I-NEXT: li a3, 0 |
| ; RV32I-NEXT: call __moddi3@plt |
| ; RV32I-NEXT: mv s3, a0 |
| ; RV32I-NEXT: mv s4, a1 |
| ; RV32I-NEXT: lui a0, 1 |
| ; RV32I-NEXT: addi a2, a0, 1327 |
| ; RV32I-NEXT: mv a0, s1 |
| ; RV32I-NEXT: mv a1, s2 |
| ; RV32I-NEXT: li a3, 0 |
| ; RV32I-NEXT: call __moddi3@plt |
| ; RV32I-NEXT: sw a1, 28(s0) |
| ; RV32I-NEXT: sw a0, 24(s0) |
| ; RV32I-NEXT: sw s4, 20(s0) |
| ; RV32I-NEXT: sw s3, 16(s0) |
| ; RV32I-NEXT: sw s6, 12(s0) |
| ; RV32I-NEXT: sw s5, 8(s0) |
| ; RV32I-NEXT: sw s8, 4(s0) |
| ; RV32I-NEXT: sw s7, 0(s0) |
| ; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload |
| ; RV32I-NEXT: addi sp, sp, 48 |
| ; RV32I-NEXT: ret |
| ; |
| ; RV32IM-LABEL: dont_fold_srem_i64: |
| ; RV32IM: # %bb.0: |
| ; RV32IM-NEXT: addi sp, sp, -48 |
| ; RV32IM-NEXT: sw ra, 44(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s0, 40(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s1, 36(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s2, 32(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s3, 28(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s4, 24(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s5, 20(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s6, 16(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s7, 12(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: sw s8, 8(sp) # 4-byte Folded Spill |
| ; RV32IM-NEXT: lw s1, 24(a1) |
| ; RV32IM-NEXT: lw s2, 28(a1) |
| ; RV32IM-NEXT: lw s3, 16(a1) |
| ; RV32IM-NEXT: lw s4, 20(a1) |
| ; RV32IM-NEXT: lw s5, 8(a1) |
| ; RV32IM-NEXT: lw s6, 12(a1) |
| ; RV32IM-NEXT: lw a3, 0(a1) |
| ; RV32IM-NEXT: lw a1, 4(a1) |
| ; RV32IM-NEXT: mv s0, a0 |
| ; RV32IM-NEXT: li a2, 1 |
| ; RV32IM-NEXT: mv a0, a3 |
| ; RV32IM-NEXT: li a3, 0 |
| ; RV32IM-NEXT: call __moddi3@plt |
| ; RV32IM-NEXT: mv s7, a0 |
| ; RV32IM-NEXT: mv s8, a1 |
| ; RV32IM-NEXT: li a2, 654 |
| ; RV32IM-NEXT: mv a0, s5 |
| ; RV32IM-NEXT: mv a1, s6 |
| ; RV32IM-NEXT: li a3, 0 |
| ; RV32IM-NEXT: call __moddi3@plt |
| ; RV32IM-NEXT: mv s5, a0 |
| ; RV32IM-NEXT: mv s6, a1 |
| ; RV32IM-NEXT: li a2, 23 |
| ; RV32IM-NEXT: mv a0, s3 |
| ; RV32IM-NEXT: mv a1, s4 |
| ; RV32IM-NEXT: li a3, 0 |
| ; RV32IM-NEXT: call __moddi3@plt |
| ; RV32IM-NEXT: mv s3, a0 |
| ; RV32IM-NEXT: mv s4, a1 |
| ; RV32IM-NEXT: lui a0, 1 |
| ; RV32IM-NEXT: addi a2, a0, 1327 |
| ; RV32IM-NEXT: mv a0, s1 |
| ; RV32IM-NEXT: mv a1, s2 |
| ; RV32IM-NEXT: li a3, 0 |
| ; RV32IM-NEXT: call __moddi3@plt |
| ; RV32IM-NEXT: sw a1, 28(s0) |
| ; RV32IM-NEXT: sw a0, 24(s0) |
| ; RV32IM-NEXT: sw s4, 20(s0) |
| ; RV32IM-NEXT: sw s3, 16(s0) |
| ; RV32IM-NEXT: sw s6, 12(s0) |
| ; RV32IM-NEXT: sw s5, 8(s0) |
| ; RV32IM-NEXT: sw s8, 4(s0) |
| ; RV32IM-NEXT: sw s7, 0(s0) |
| ; RV32IM-NEXT: lw ra, 44(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s0, 40(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s1, 36(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s2, 32(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s3, 28(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s4, 24(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s5, 20(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s6, 16(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s7, 12(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: lw s8, 8(sp) # 4-byte Folded Reload |
| ; RV32IM-NEXT: addi sp, sp, 48 |
| ; RV32IM-NEXT: ret |
| ; |
| ; RV64I-LABEL: dont_fold_srem_i64: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -48 |
| ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: ld s0, 24(a1) |
| ; RV64I-NEXT: ld s1, 16(a1) |
| ; RV64I-NEXT: ld a2, 8(a1) |
| ; RV64I-NEXT: mv s2, a0 |
| ; RV64I-NEXT: li a1, 654 |
| ; RV64I-NEXT: mv a0, a2 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s3, a0 |
| ; RV64I-NEXT: li a1, 23 |
| ; RV64I-NEXT: mv a0, s1 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: mv s1, a0 |
| ; RV64I-NEXT: lui a0, 1 |
| ; RV64I-NEXT: addiw a1, a0, 1327 |
| ; RV64I-NEXT: mv a0, s0 |
| ; RV64I-NEXT: call __moddi3@plt |
| ; RV64I-NEXT: sd a0, 24(s2) |
| ; RV64I-NEXT: sd s1, 16(s2) |
| ; RV64I-NEXT: sd s3, 8(s2) |
| ; RV64I-NEXT: sd zero, 0(s2) |
| ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 48 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IM-LABEL: dont_fold_srem_i64: |
| ; RV64IM: # %bb.0: |
| ; RV64IM-NEXT: ld a2, 16(a1) |
| ; RV64IM-NEXT: lui a3, %hi(.LCPI6_0) |
| ; RV64IM-NEXT: ld a3, %lo(.LCPI6_0)(a3) |
| ; RV64IM-NEXT: ld a4, 24(a1) |
| ; RV64IM-NEXT: ld a1, 8(a1) |
| ; RV64IM-NEXT: mulh a3, a2, a3 |
| ; RV64IM-NEXT: add a3, a3, a2 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srai a3, a3, 4 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, %hi(.LCPI6_1) |
| ; RV64IM-NEXT: ld a5, %lo(.LCPI6_1)(a5) |
| ; RV64IM-NEXT: li a6, 23 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: sub a2, a2, a3 |
| ; RV64IM-NEXT: mulh a3, a1, a5 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srai a3, a3, 8 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, %hi(.LCPI6_2) |
| ; RV64IM-NEXT: ld a5, %lo(.LCPI6_2)(a5) |
| ; RV64IM-NEXT: li a6, 654 |
| ; RV64IM-NEXT: mul a3, a3, a6 |
| ; RV64IM-NEXT: sub a1, a1, a3 |
| ; RV64IM-NEXT: mulh a3, a4, a5 |
| ; RV64IM-NEXT: srli a5, a3, 63 |
| ; RV64IM-NEXT: srai a3, a3, 11 |
| ; RV64IM-NEXT: add a3, a3, a5 |
| ; RV64IM-NEXT: lui a5, 1 |
| ; RV64IM-NEXT: addiw a5, a5, 1327 |
| ; RV64IM-NEXT: mul a3, a3, a5 |
| ; RV64IM-NEXT: sub a4, a4, a3 |
| ; RV64IM-NEXT: sd zero, 0(a0) |
| ; RV64IM-NEXT: sd a4, 24(a0) |
| ; RV64IM-NEXT: sd a1, 8(a0) |
| ; RV64IM-NEXT: sd a2, 16(a0) |
| ; RV64IM-NEXT: ret |
| %1 = srem <4 x i64> %x, <i64 1, i64 654, i64 23, i64 5423> |
| ret <4 x i64> %1 |
| } |