blob: aabb4889db6e3797066d472252acc5fda5c77b1f [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 -pass-remarks-missed='gisel.*' -o - %s 2> %t.err | FileCheck -check-prefix=SI %s
# RUN: FileCheck -check-prefix=ERR %s < %t.err
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 -pass-remarks-missed='gisel.*' %s -o - 2> %t.err | FileCheck -check-prefix=VI %s
# RUN: FileCheck -check-prefix=ERR %s < %t.err
# ERR-NOT: remark
# ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1:_(<2 x s8>), %0:_(p1) :: (store (<2 x s4>), addrspace 1) (in function: test_truncstore_global_v2s8_to_1_align1)
# ERR-NEXT: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %2:_(<3 x s8>), %0:_(p1) :: (store (<3 x s2>), addrspace 1) (in function: test_truncstore_global_v3s8_to_1_align1)
# ERR-NEXT: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %2:_(<3 x s8>), %0:_(p1) :: (store (<3 x s4>), addrspace 1) (in function: test_truncstore_global_v3s8_to_2_align2)
# ERR-NOT: remark
---
name: test_store_global_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
; VI-LABEL: name: test_store_global_i32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
G_STORE %1, %0 :: (store (s32), addrspace 1)
...
---
name: test_store_global_i64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_i64
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](s64), [[COPY]](p1) :: (store (s64), addrspace 1)
; VI-LABEL: name: test_store_global_i64
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](s64), [[COPY]](p1) :: (store (s64), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s64), addrspace 1)
...
---
name: test_store_global_p1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_p1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](p1), [[COPY]](p1) :: (store (p1), addrspace 1)
; VI-LABEL: name: test_store_global_p1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](p1), [[COPY]](p1) :: (store (p1), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p1) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (p1), addrspace 1)
...
---
name: test_store_global_p4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_p4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p4) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](p4), [[COPY]](p1) :: (store (p4), addrspace 1)
; VI-LABEL: name: test_store_global_p4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p4) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](p4), [[COPY]](p1) :: (store (p4), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p4) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (p4), addrspace 1)
...
---
name: test_store_global_p3
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_p3
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2
; SI: G_STORE [[COPY1]](p3), [[COPY]](p1) :: (store (p3), addrspace 1)
; VI-LABEL: name: test_store_global_p3
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr2
; VI: G_STORE [[COPY1]](p3), [[COPY]](p1) :: (store (p3), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(p3) = COPY $vgpr2
G_STORE %1, %0 :: (store (p3), addrspace 1)
...
---
name: test_store_global_v2s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_store_global_v2s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; SI: G_STORE [[COPY1]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), addrspace 1)
; VI-LABEL: name: test_store_global_v2s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; VI: G_STORE [[COPY1]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (<2 x s32>), addrspace 1)
...
---
name: test_store_global_v2s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_v2s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
; SI: G_STORE [[COPY1]](<2 x s16>), [[COPY]](p1) :: (store (<2 x s16>), addrspace 1)
; VI-LABEL: name: test_store_global_v2s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
; VI: G_STORE [[COPY1]](<2 x s16>), [[COPY]](p1) :: (store (<2 x s16>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s16>) = COPY $vgpr2
G_STORE %1, %0 :: (store (<2 x s16>), addrspace 1)
...
---
name: test_store_global_v3s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[COPY1]](<3 x s32>), 0
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<3 x s32>), 64
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 4, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, addrspace 1)
; VI-LABEL: name: test_store_global_v3s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: G_STORE [[COPY1]](<3 x s32>), [[COPY]](p1) :: (store (<3 x s32>), align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
G_STORE %1, %0 :: (store (<3 x s32>), align 4, addrspace 1)
...
---
name: test_truncstore_global_s64_to_s8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s8), addrspace 1)
...
---
name: test_truncstore_global_s64_to_s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s16), addrspace 1)
...
---
name: test_truncstore_global_s64_to_s16_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s16_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C1]]
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C]](s32)
; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s16_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C]](s16)
; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s16), addrspace 1, align 1)
...
---
name: test_truncstore_global_s64_to_s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s32
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s32
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s32), addrspace 1)
...
---
name: test_truncstore_global_s64_to_s32_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s32_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s32_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s32), addrspace 1, align 2)
...
---
name: test_truncstore_global_s64_to_s32_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; SI-LABEL: name: test_truncstore_global_s64_to_s32_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[COPY2]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C2]]
; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[C1]](s32)
; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: G_STORE [[LSHR1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
; SI: G_STORE [[LSHR2]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-LABEL: name: test_truncstore_global_s64_to_s32_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; VI: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
G_STORE %1, %0 :: (store (s32), addrspace 1, align 1)
...
---
name: test_truncstore_global_s128_to_s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_s128_to_s16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; SI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-LABEL: name: test_truncstore_global_s128_to_s16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
; VI: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store (s16), addrspace 1)
...
---
name: test_truncstore_global_s128_to_s8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_s128_to_s8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; SI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-LABEL: name: test_truncstore_global_s128_to_s8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; VI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store (s128), addrspace 1)
...
---
name: test_store_global_i1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; SI: G_STORE [[AND1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-LABEL: name: test_store_global_i1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
; VI: G_STORE [[AND1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s1) = G_TRUNC %1
G_STORE %2, %0 :: (store (s1), addrspace 1)
...
---
name: test_store_global_i8
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i8
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI-LABEL: name: test_store_global_i8
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s8) = G_TRUNC %1
G_STORE %2, %0 :: (store (s8), addrspace 1)
...
---
name: test_store_global_i16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; SI-LABEL: name: test_store_global_i16
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; SI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-LABEL: name: test_store_global_i16
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; VI: G_STORE [[COPY1]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s16) = G_TRUNC %1
G_STORE %2, %0 :: (store (s16), addrspace 1)
...
---
name: test_store_global_96
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4
; SI-LABEL: name: test_store_global_96
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4
; SI: [[BITCAST:%[0-9]+]]:_(<3 x s32>) = G_BITCAST [[COPY]](s96)
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[BITCAST]](<3 x s32>), 0
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[BITCAST]](<3 x s32>), 64
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY1]](p1) :: (store (<2 x s32>), align 16, addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
; VI-LABEL: name: test_store_global_96
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4
; VI: [[BITCAST:%[0-9]+]]:_(<3 x s32>) = G_BITCAST [[COPY]](s96)
; VI: G_STORE [[BITCAST]](<3 x s32>), [[COPY1]](p1) :: (store (<3 x s32>), align 16, addrspace 1)
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
%1:_(p1) = COPY $vgpr3_vgpr4
G_STORE %0, %1 :: (store (s96), addrspace 1, align 16)
...
---
name: test_store_global_i128
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_i128
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; SI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
; VI-LABEL: name: test_store_global_i128
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY1]](s128)
; VI: G_STORE [[BITCAST]](<4 x s32>), [[COPY]](p1) :: (store (<4 x s32>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store (s128), addrspace 1)
...
---
name: test_store_global_v2s64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s64
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store (<2 x s64>), addrspace 1)
; VI-LABEL: name: test_store_global_v2s64
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store (<2 x s64>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
G_STORE %1, %0 :: (store (<2 x s64>), addrspace 1)
...
---
name: test_store_global_v2s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store (<2 x s8>), addrspace 1, align 1)
...
---
name: test_store_global_v2s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store (<2 x s8>), addrspace 1, align 2)
...
---
name: test_store_global_v2s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v2s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; VI-LABEL: name: test_store_global_v2s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store (<2 x s8>), addrspace 1, align 4)
...
---
name: test_store_global_v3s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[COPY3:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY3]](s16)
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[C1]](s32)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY3]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[COPY2]], [[C1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<3 x s8>), addrspace 1, align 1)
...
---
name: test_store_global_v3s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: G_STORE [[UV2]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<3 x s8>), addrspace 1, align 2)
...
---
name: test_store_global_v3s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_store_global_v3s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C3]](s32)
; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
; SI: G_STORE [[COPY3]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; SI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
; VI-LABEL: name: test_store_global_v3s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; VI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store (s16), align 4, addrspace 1)
; VI: G_STORE [[LSHR]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<3 x s8>), addrspace 1, align 4)
...
---
name: test_store_global_v4s8_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store (s8) into unknown-address + 3, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<4 x s8>), addrspace 1, align 1)
...
---
name: test_store_global_v4s8_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; SI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C2]]
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C1]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16)
; SI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32)
; VI: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[UV3]](s32)
; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C1]](s16)
; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[OR1]](s16)
; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store (s16) into unknown-address + 2, addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<4 x s8>), addrspace 1, align 2)
...
---
name: test_store_global_v4s8_align4
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_store_global_v4s8_align4
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C2]](s32)
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C3]](s32)
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; SI: G_STORE [[OR2]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
; VI-LABEL: name: test_store_global_v4s8_align4
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; VI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; VI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C2]](s32)
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; VI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C3]](s32)
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; VI: G_STORE [[OR2]](s32), [[COPY]](p1) :: (store (s32), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
%2:_(<4 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<4 x s8>), addrspace 1, align 4)
...
---
name: test_truncstore_global_v2s8_to_1_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
; SI-LABEL: name: test_truncstore_global_v2s8_to_1_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[DEF:%[0-9]+]]:_(<2 x s8>) = G_IMPLICIT_DEF
; SI: G_STORE [[DEF]](<2 x s8>), [[COPY]](p1) :: (store (<2 x s4>), addrspace 1)
; VI-LABEL: name: test_truncstore_global_v2s8_to_1_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[DEF:%[0-9]+]]:_(<2 x s8>) = G_IMPLICIT_DEF
; VI: G_STORE [[DEF]](<2 x s8>), [[COPY]](p1) :: (store (<2 x s4>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<2 x s8>) = G_IMPLICIT_DEF
G_STORE %1, %0 :: (store (<2 x s4>), addrspace 1, align 1)
...
---
name: test_truncstore_global_v3s8_to_1_align1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_truncstore_global_v3s8_to_1_align1
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[TRUNC:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[COPY1]](<3 x s32>)
; SI: G_STORE [[TRUNC]](<3 x s8>), [[COPY]](p1) :: (store (<3 x s2>), addrspace 1)
; VI-LABEL: name: test_truncstore_global_v3s8_to_1_align1
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[TRUNC:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[COPY1]](<3 x s32>)
; VI: G_STORE [[TRUNC]](<3 x s8>), [[COPY]](p1) :: (store (<3 x s2>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<3 x s2>), addrspace 1, align 1)
...
---
name: test_truncstore_global_v3s8_to_2_align2
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4
; SI-LABEL: name: test_truncstore_global_v3s8_to_2_align2
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; SI: [[TRUNC:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[COPY1]](<3 x s32>)
; SI: G_STORE [[TRUNC]](<3 x s8>), [[COPY]](p1) :: (store (<3 x s4>), addrspace 1)
; VI-LABEL: name: test_truncstore_global_v3s8_to_2_align2
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
; VI: [[TRUNC:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[COPY1]](<3 x s32>)
; VI: G_STORE [[TRUNC]](<3 x s8>), [[COPY]](p1) :: (store (<3 x s4>), addrspace 1)
%0:_(p1) = COPY $vgpr0_vgpr1
%1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
%2:_(<3 x s8>) = G_TRUNC %1
G_STORE %2, %0 :: (store (<3 x s4>), addrspace 1, align 2)
...