blob: a311e005a57411e90848b1efedc144233ad164c2 [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-unknown -run-pass=instruction-select %s -o - | FileCheck %s
---
name: v8s16_gpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q1, $w0
; CHECK-LABEL: name: v8s16_gpr
; CHECK: liveins: $q1, $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[INSvi16gpr:%[0-9]+]]:fpr128 = INSvi16gpr [[COPY1]], 1, [[COPY]]
; CHECK: $q0 = COPY [[INSvi16gpr]]
; CHECK: RET_ReallyLR implicit $q0
%0:gpr(s32) = COPY $w0
%trunc:gpr(s16) = G_TRUNC %0
%1:fpr(<8 x s16>) = COPY $q1
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<8 x s16>) = G_INSERT_VECTOR_ELT %1, %trunc:gpr(s16), %3:gpr(s32)
$q0 = COPY %2(<8 x s16>)
RET_ReallyLR implicit $q0
...
---
name: v8s16_fpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q1, $h0
; CHECK-LABEL: name: v8s16_fpr
; CHECK: liveins: $q1, $h0
; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.hsub
; CHECK: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[COPY1]], 1, [[INSERT_SUBREG]], 0
; CHECK: $q0 = COPY [[INSvi16lane]]
; CHECK: RET_ReallyLR implicit $q0
%0:fpr(s16) = COPY $h0
%1:fpr(<8 x s16>) = COPY $q1
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<8 x s16>) = G_INSERT_VECTOR_ELT %1, %0(s16), %3(s32)
$q0 = COPY %2(<8 x s16>)
RET_ReallyLR implicit $q0
...
---
name: v4s32_fpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q1, $s0
; CHECK-LABEL: name: v4s32_fpr
; CHECK: liveins: $q1, $s0
; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
; CHECK: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[COPY1]], 1, [[INSERT_SUBREG]], 0
; CHECK: $q0 = COPY [[INSvi32lane]]
; CHECK: RET_ReallyLR implicit $q0
%0:fpr(s32) = COPY $s0
%1:fpr(<4 x s32>) = COPY $q1
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<4 x s32>) = G_INSERT_VECTOR_ELT %1, %0(s32), %3(s32)
$q0 = COPY %2(<4 x s32>)
RET_ReallyLR implicit $q0
...
---
name: v4s32_gpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $w0
; CHECK-LABEL: name: v4s32_gpr
; CHECK: liveins: $q0, $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[COPY1]], 1, [[COPY]]
; CHECK: $q0 = COPY [[INSvi32gpr]]
; CHECK: RET_ReallyLR implicit $q0
%0:gpr(s32) = COPY $w0
%1:fpr(<4 x s32>) = COPY $q0
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<4 x s32>) = G_INSERT_VECTOR_ELT %1, %0(s32), %3(s32)
$q0 = COPY %2(<4 x s32>)
RET_ReallyLR implicit $q0
...
---
name: v2s64_fpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $d0, $q1
; CHECK-LABEL: name: v2s64_fpr
; CHECK: liveins: $d0, $q1
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[COPY1]], 1, [[INSERT_SUBREG]], 0
; CHECK: $q0 = COPY [[INSvi64lane]]
; CHECK: RET_ReallyLR implicit $q0
%0:fpr(s64) = COPY $d0
%1:fpr(<2 x s64>) = COPY $q1
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %1, %0(s64), %3(s32)
$q0 = COPY %2(<2 x s64>)
RET_ReallyLR implicit $q0
...
---
name: v2s64_gpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $x0
; CHECK-LABEL: name: v2s64_gpr
; CHECK: liveins: $q0, $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[COPY1]], 0, [[COPY]]
; CHECK: $q0 = COPY [[INSvi64gpr]]
; CHECK: RET_ReallyLR implicit $q0
%0:gpr(s64) = COPY $x0
%1:fpr(<2 x s64>) = COPY $q0
%3:gpr(s32) = G_CONSTANT i32 0
%2:fpr(<2 x s64>) = G_INSERT_VECTOR_ELT %1, %0(s64), %3(s32)
$q0 = COPY %2(<2 x s64>)
RET_ReallyLR implicit $q0
...
---
name: v2s32_fpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $d1, $s0
; CHECK-LABEL: name: v2s32_fpr
; CHECK: liveins: $d1, $s0
; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.ssub
; CHECK: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32lane]].dsub
; CHECK: $d0 = COPY [[COPY2]]
; CHECK: RET_ReallyLR implicit $d0
%0:fpr(s32) = COPY $s0
%1:fpr(<2 x s32>) = COPY $d1
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<2 x s32>) = G_INSERT_VECTOR_ELT %1, %0(s32), %3(s32)
$d0 = COPY %2(<2 x s32>)
RET_ReallyLR implicit $d0
...
---
name: v2s32_gpr
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $d0, $w0
; CHECK-LABEL: name: v2s32_gpr
; CHECK: liveins: $d0, $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[COPY]]
; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub
; CHECK: $d0 = COPY [[COPY2]]
; CHECK: RET_ReallyLR implicit $d0
%0:gpr(s32) = COPY $w0
%1:fpr(<2 x s32>) = COPY $d0
%3:gpr(s32) = G_CONSTANT i32 1
%2:fpr(<2 x s32>) = G_INSERT_VECTOR_ELT %1, %0(s32), %3(s32)
$d0 = COPY %2(<2 x s32>)
RET_ReallyLR implicit $d0
...