blob: fbf5e2b658a9222b47a47cd9825beddb1311f421 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; For each of these examples, it is fewer instructions to narrow the load and
; then shift (which is an opportunity that can be exposed by doing the mask
; before the shift).
define ptr @narrow_to_lbu(ptr %a, ptr %b) {
; RV32I-LABEL: narrow_to_lbu:
; RV32I: # %bb.0:
; RV32I-NEXT: lbu a0, 0(a0)
; RV32I-NEXT: slli a0, a0, 4
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: narrow_to_lbu:
; RV64I: # %bb.0:
; RV64I-NEXT: lbu a0, 0(a0)
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = load i16, ptr %a, align 2
%2 = shl i16 %1, 1
%3 = and i16 %2, 510
%4 = zext nneg i16 %3 to i64
%5 = getelementptr inbounds double, ptr %b, i64 %4
ret ptr %5
}
define ptr @narrow_to_lhu(ptr %a, ptr %b) {
; RV32I-LABEL: narrow_to_lhu:
; RV32I: # %bb.0:
; RV32I-NEXT: lhu a0, 0(a0)
; RV32I-NEXT: slli a0, a0, 4
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: narrow_to_lhu:
; RV64I: # %bb.0:
; RV64I-NEXT: lhu a0, 0(a0)
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = load i32, ptr %a, align 4
%2 = shl i32 %1, 1
%3 = and i32 %2, 131070
%4 = zext nneg i32 %3 to i64
%5 = getelementptr inbounds double, ptr %b, i64 %4
ret ptr %5
}
define ptr @narrow_to_lwu(ptr %a, ptr %b) {
; RV32I-LABEL: narrow_to_lwu:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a0, 0(a0)
; RV32I-NEXT: slli a0, a0, 4
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: narrow_to_lwu:
; RV64I: # %bb.0:
; RV64I-NEXT: lwu a0, 0(a0)
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ret
%1 = load i64, ptr %a, align 8
%2 = shl i64 %1, 1
%3 = and i64 %2, 8589934590
%4 = getelementptr inbounds double, ptr %b, i64 %3
ret ptr %4
}