blob: a08e234618029d59ab39a02a45d1a503506f809c [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-fast -verify-machineinstrs -o - %s | FileCheck %s
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-greedy -verify-machineinstrs -o - %s | FileCheck %s
...
# Generate the 3 operand vector bitfield extract instructions for 32-bit
# operations only.
---
name: test_ubfx_s32_vvv
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0, $vgpr1, $vgpr2
; CHECK-LABEL: name: test_ubfx_s32_vvv
; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY1]](s32), [[COPY2]]
; CHECK: $vgpr0 = COPY [[UBFX]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
%3:_(s32) = G_UBFX %0, %1(s32), %2
$vgpr0 = COPY %3(s32)
...
---
name: test_ubfx_s32_vii
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0
; CHECK-LABEL: name: test_ubfx_s32_vii
; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY1]](s32), [[COPY2]]
; CHECK: $vgpr0 = COPY [[UBFX]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = G_CONSTANT i32 10
%2:_(s32) = G_CONSTANT i32 4
%3:_(s32) = G_UBFX %0, %1(s32), %2
$vgpr0 = COPY %3(s32)
...
---
name: test_ubfx_s32_vss
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0, $sgpr0, $sgpr1
; CHECK-LABEL: name: test_ubfx_s32_vss
; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY3]](s32), [[COPY4]]
; CHECK: $vgpr0 = COPY [[UBFX]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $sgpr0
%2:_(s32) = COPY $sgpr1
%3:_(s32) = G_UBFX %0, %1(s32), %2
$vgpr0 = COPY %3(s32)
...
# Expand to a sequence that implements the 64-bit bitfield extract using
# shifts and masks.
---
name: test_ubfx_s64_vvv
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3
; CHECK-LABEL: name: test_ubfx_s64_vvv
; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr3
; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32)
; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64)
; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[LSHR]], [[SUB]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:vgpr(s64) = G_LSHR [[SHL]], [[SUB]](s32)
; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
%3:_(s64) = G_UBFX %0, %1(s32), %2
$vgpr0_vgpr1 = COPY %3(s64)
...
---
name: test_ubfx_s64_vss
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0_vgpr1, $sgpr0, $sgpr1
; CHECK-LABEL: name: test_ubfx_s64_vss
; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32)
; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64)
; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[LSHR]], [[SUB]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:vgpr(s64) = G_LSHR [[SHL]], [[SUB]](s32)
; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr0
%2:_(s32) = COPY $vgpr1
%3:_(s64) = G_UBFX %0, %1(s32), %2
$vgpr0_vgpr1 = COPY %3(s64)
...
# If the offset and width are constants, use the 32-bit bitfield extract,
# and merge to create a 64-bit result.
---
name: test_ubfx_s64_vii_small
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_ubfx_s64_vii_small
; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32)
; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64)
; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[UV]], [[C2]](s32), [[COPY2]]
; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UBFX]](s32), [[C2]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 31
%2:_(s32) = G_CONSTANT i32 4
%3:_(s64) = G_UBFX %0, %1(s32), %2
$vgpr0_vgpr1 = COPY %3(s64)
...
---
name: test_ubfx_s64_vii_big
legalized: true
body: |
bb.0.entry:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_ubfx_s64_vii_big
; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 8
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 40
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32)
; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64)
; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 8
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[UV1]], [[C2]](s32), [[C3]]
; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UV]](s32), [[UBFX]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 8
%2:_(s32) = G_CONSTANT i32 40
%3:_(s64) = G_UBFX %0, %1(s32), %2
$vgpr0_vgpr1 = COPY %3(s64)
...
---
name: test_ubfx_s64_svv
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0_sgpr1, $vgpr0, $vgpr1
; CHECK-LABEL: name: test_ubfx_s64_svv
; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK: [[COPY3:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64)
; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY3]], [[COPY1]](s32)
; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64)
; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[LSHR]], [[SUB]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:vgpr(s64) = G_LSHR [[SHL]], [[SUB]](s32)
; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s32) = COPY $vgpr0
%2:_(s32) = COPY $vgpr1
%3:_(s64) = G_UBFX %0, %1(s32), %2
$vgpr0_vgpr1 = COPY %3(s64)
...
# Expand to a sequence that combines the offset and width for the two operand
# version of the 32-bit instruction.
---
name: test_ubfx_s32_svv
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0, $vgpr0, $vgpr1
; CHECK-LABEL: name: test_ubfx_s32_svv
; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY3]], [[COPY1]](s32), [[COPY2]]
; CHECK: $vgpr0 = COPY [[UBFX]](s32)
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = COPY $vgpr1
%3:_(s32) = G_UBFX %0, %1(s32), %2
$vgpr0 = COPY %3(s32)
...
---
name: test_ubfx_s32_sss
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-LABEL: name: test_ubfx_s32_sss
; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0
; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32)
; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[S_BFE_U32_:%[0-9]+]]:sreg_32(s32) = S_BFE_U32 [[COPY]](s32), [[OR]](s32), implicit-def $scc
; CHECK: $sgpr0 = COPY [[S_BFE_U32_]](s32)
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s32) = COPY $sgpr2
%3:_(s32) = G_UBFX %0, %1(s32), %2
$sgpr0 = COPY %3(s32)
...
---
name: test_ubfx_s32_sii
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0, $sgpr1, $sgpr2
; CHECK-LABEL: name: test_ubfx_s32_sii
; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[S_BFE_U32_:%[0-9]+]]:sreg_32(s32) = S_BFE_U32 [[COPY]](s32), [[OR]](s32), implicit-def $scc
; CHECK: $sgpr0 = COPY [[S_BFE_U32_]](s32)
%0:_(s32) = COPY $sgpr0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s32) = G_CONSTANT i32 10
%3:_(s32) = G_UBFX %0, %1(s32), %2
$sgpr0 = COPY %3(s32)
...
# Expand to a sequence that combines the offset and width for the two operand
# version of the 64-bit scalar instruction.
---
name: test_ubfx_s64_sss
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
; CHECK-LABEL: name: test_ubfx_s64_sss
; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr3
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32)
; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[S_BFE_U64_:%[0-9]+]]:sreg_64(s64) = S_BFE_U64 [[COPY]](s64), [[OR]](s32), implicit-def $scc
; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]](s64)
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s32) = COPY $sgpr2
%2:_(s32) = COPY $sgpr3
%3:_(s64) = G_UBFX %0, %1(s32), %2
$sgpr0_sgpr1 = COPY %3(s64)
...
---
name: test_ubfx_s64_sii
legalized: true
body: |
bb.0.entry:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: test_ubfx_s64_sii
; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[S_BFE_U64_:%[0-9]+]]:sreg_64(s64) = S_BFE_U64 [[COPY]](s64), [[OR]](s32), implicit-def $scc
; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]](s64)
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 1
%2:_(s32) = G_CONSTANT i32 10
%3:_(s64) = G_UBFX %0, %1(s32), %2
$sgpr0_sgpr1 = COPY %3(s64)
...