blob: af290a7bcf03996ff3afaaba00d0ba398d843cf4 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -march=mips -mcpu=mips32r2 -stop-before=expand-isel-pseudos < %s | FileCheck %s --check-prefix=MIPS
; RUN: llc -march=mips -mcpu=mips32r2 -mattr=+micromips -stop-before=expand-isel-pseudos < %s | FileCheck %s --check-prefix=MICROMIPS
; Test that the correct ISA version of the unaligned memory operations is
; selected up front.
define void @g2(i32* %a, i32* %b) {
; MIPS-LABEL: name: g2
; MIPS: bb.0.entry:
; MIPS: liveins: $a0, $a1
; MIPS: [[COPY:%[0-9]+]]:gpr32 = COPY $a1
; MIPS: [[COPY1:%[0-9]+]]:gpr32 = COPY $a0
; MIPS: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
; MIPS: [[LWL:%[0-9]+]]:gpr32 = LWL [[COPY1]], 0, [[DEF]] :: (load 4 from %ir.a, align 1)
; MIPS: [[LWR:%[0-9]+]]:gpr32 = LWR [[COPY1]], 3, [[LWL]] :: (load 4 from %ir.a, align 1)
; MIPS: SWL [[LWR]], [[COPY]], 0 :: (store 4 into %ir.b, align 1)
; MIPS: SWR [[LWR]], [[COPY]], 3 :: (store 4 into %ir.b, align 1)
; MIPS: RetRA
; MICROMIPS-LABEL: name: g2
; MICROMIPS: bb.0.entry:
; MICROMIPS: liveins: $a0, $a1
; MICROMIPS: [[COPY:%[0-9]+]]:gpr32 = COPY $a1
; MICROMIPS: [[COPY1:%[0-9]+]]:gpr32 = COPY $a0
; MICROMIPS: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
; MICROMIPS: [[LWL_MM:%[0-9]+]]:gpr32 = LWL_MM [[COPY1]], 0, [[DEF]] :: (load 4 from %ir.a, align 1)
; MICROMIPS: [[LWR_MM:%[0-9]+]]:gpr32 = LWR_MM [[COPY1]], 3, [[LWL_MM]] :: (load 4 from %ir.a, align 1)
; MICROMIPS: SWL_MM [[LWR_MM]], [[COPY]], 0 :: (store 4 into %ir.b, align 1)
; MICROMIPS: SWR_MM [[LWR_MM]], [[COPY]], 3 :: (store 4 into %ir.b, align 1)
; MICROMIPS: RetRA
entry:
%0 = load i32, i32* %a, align 1
store i32 %0, i32* %b, align 1
ret void
}