blob: 5186dbfa7b4aba750f7da0b89c4a42dccec3292a [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc %s -verify-machineinstrs -O0 -run-pass=legalizer -mtriple aarch64-unknown-unknown -o - | FileCheck %s
...
---
name: v2s64
alignment: 4
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $q1
; CHECK-LABEL: name: v2s64
; CHECK: liveins: $q0, $q1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[COPY]](<2 x s64>), [[BUILD_VECTOR]]
; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<2 x s64>)
; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s64>)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C2]](s64)
; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]]
; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[ASHR]]
; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY]], [[XOR]]
; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
; CHECK: $q0 = COPY [[OR]](<2 x s64>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(<2 x s64>) = COPY $q0
%1:_(<2 x s64>) = COPY $q1
%3:_(s64) = G_CONSTANT i64 0
%2:_(<2 x s64>) = G_BUILD_VECTOR %3(s64), %3(s64)
%4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s64>), %2
%5:_(<2 x s64>) = G_SELECT %4(<2 x s1>), %1, %0
$q0 = COPY %5(<2 x s64>)
RET_ReallyLR implicit $q0
...
---
name: v2s32
alignment: 4
tracksRegLiveness: true
body: |
bb.0:
liveins: $d0, $d1
; CHECK-LABEL: name: v2s32
; CHECK: liveins: $d0, $d1
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(sgt), [[COPY]](<2 x s32>), [[BUILD_VECTOR]]
; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY [[ICMP]](<2 x s32>)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(<2 x s32>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<2 x s32>)
; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s32>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<2 x s32>)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C2]](s32), [[C2]](s32)
; CHECK: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]]
; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[ASHR]]
; CHECK: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[XOR]]
; CHECK: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[AND]], [[AND1]]
; CHECK: $d0 = COPY [[OR]](<2 x s32>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(<2 x s32>) = COPY $d0
%1:_(<2 x s32>) = COPY $d1
%3:_(s32) = G_CONSTANT i32 0
%2:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %3(s32)
%4:_(<2 x s1>) = G_ICMP intpred(sgt), %0(<2 x s32>), %2
%5:_(<2 x s32>) = G_SELECT %4(<2 x s1>), %1, %0
$d0 = COPY %5(<2 x s32>)
RET_ReallyLR implicit $d0
...
---
name: v16s8
alignment: 4
tracksRegLiveness: true
body: |
bb.0:
liveins: $q0, $q1
; CHECK-LABEL: name: v16s8
; CHECK: liveins: $q0, $q1
; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8)
; CHECK: [[ICMP:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(sgt), [[COPY]](<16 x s8>), [[BUILD_VECTOR]]
; CHECK: [[COPY2:%[0-9]+]]:_(<16 x s8>) = COPY [[ICMP]](<16 x s8>)
; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 7
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8)
; CHECK: [[SHL:%[0-9]+]]:_(<16 x s8>) = G_SHL [[COPY2]], [[BUILD_VECTOR1]](<16 x s8>)
; CHECK: [[ASHR:%[0-9]+]]:_(<16 x s8>) = G_ASHR [[SHL]], [[BUILD_VECTOR1]](<16 x s8>)
; CHECK: [[C2:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8), [[C2]](s8)
; CHECK: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[ASHR]], [[BUILD_VECTOR2]]
; CHECK: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY1]], [[ASHR]]
; CHECK: [[AND1:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY]], [[XOR]]
; CHECK: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[AND]], [[AND1]]
; CHECK: $q0 = COPY [[OR]](<16 x s8>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(<16 x s8>) = COPY $q0
%1:_(<16 x s8>) = COPY $q1
%3:_(s8) = G_CONSTANT i8 0
%2:_(<16 x s8>) = G_BUILD_VECTOR %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8), %3(s8)
%4:_(<16 x s1>) = G_ICMP intpred(sgt), %0(<16 x s8>), %2
%5:_(<16 x s8>) = G_SELECT %4(<16 x s1>), %1, %0
$q0 = COPY %5(<16 x s8>)
RET_ReallyLR implicit $q0
...
---
name: scalar_mask
alignment: 4
tracksRegLiveness: true
liveins:
- { reg: '$w0' }
- { reg: '$q0' }
body: |
bb.1:
liveins: $q0, $w0
; CHECK-LABEL: name: scalar_mask
; CHECK: liveins: $q0, $w0
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4100
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 1
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[SEXT_INREG]](s32), [[C2]](s64)
; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
; CHECK: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]]
; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]]
; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]]
; CHECK: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND]], [[AND1]]
; CHECK: $q0 = COPY [[OR]](<4 x s32>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(s32) = COPY $w0
%1:_(<4 x s32>) = COPY $q0
%2:_(s32) = G_CONSTANT i32 4100
%6:_(s32) = G_FCONSTANT float 0.000000e+00
%5:_(<4 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32), %6(s32), %6(s32)
%3:_(s1) = G_ICMP intpred(eq), %0(s32), %2
%4:_(<4 x s32>) = G_SELECT %3(s1), %1, %5
$q0 = COPY %4(<4 x s32>)
RET_ReallyLR implicit $q0
...