blob: 3c66fb4e4d60f5bf13ecc499a6e38b6330bae094 [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX8
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx906 -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX9
---
name: test_umulh_s32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; GFX8-LABEL: name: test_umulh_s32
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY1]]
; GFX8: $vgpr0 = COPY [[UMULH]](s32)
; GFX9-LABEL: name: test_umulh_s32
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY1]]
; GFX9: $vgpr0 = COPY [[UMULH]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_UMULH %0, %1
$vgpr0 = COPY %2
...
---
name: test_umulh_v2s32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX8-LABEL: name: test_umulh_v2s32
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UMULH]](s32), [[UMULH1]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: test_umulh_v2s32
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UMULH]](s32), [[UMULH1]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_UMULH %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: test_umulh_s64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX8-LABEL: name: test_umulh_s64
; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV2]]
; GFX8: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV3]]
; GFX8: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
; GFX8: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
; GFX8: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
; GFX8: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
; GFX8: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
; GFX8: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[MV]](s64)
; GFX9-LABEL: name: test_umulh_s64
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV2]]
; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
; GFX9: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV2]]
; GFX9: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV]], [[UV3]]
; GFX9: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
; GFX9: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
; GFX9: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
; GFX9: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
; GFX9: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
; GFX9: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
; GFX9: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
; GFX9: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV1]], [[UV3]]
; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_UMULH %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: test_umulh_v2s64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8-LABEL: name: test_umulh_v2s64
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
; GFX8: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV7]]
; GFX8: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV6]]
; GFX8: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV7]]
; GFX8: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
; GFX8: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
; GFX8: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
; GFX8: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
; GFX8: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
; GFX8: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV7]]
; GFX8: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX8: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV10]]
; GFX8: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV11]]
; GFX8: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV10]]
; GFX8: [[UADDO10:%[0-9]+]]:_(s32), [[UADDO11:%[0-9]+]]:_(s1) = G_UADDO [[MUL3]], [[MUL4]]
; GFX8: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO11]](s1)
; GFX8: [[UADDO12:%[0-9]+]]:_(s32), [[UADDO13:%[0-9]+]]:_(s1) = G_UADDO [[UADDO10]], [[UMULH4]]
; GFX8: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO13]](s1)
; GFX8: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ZEXT5]], [[ZEXT6]]
; GFX8: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV11]]
; GFX8: [[UMULH5:%[0-9]+]]:_(s32) = G_UMULH [[UV9]], [[UV10]]
; GFX8: [[UMULH6:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV11]]
; GFX8: [[UADDO14:%[0-9]+]]:_(s32), [[UADDO15:%[0-9]+]]:_(s1) = G_UADDO [[MUL5]], [[UMULH5]]
; GFX8: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO15]](s1)
; GFX8: [[UADDO16:%[0-9]+]]:_(s32), [[UADDO17:%[0-9]+]]:_(s1) = G_UADDO [[UADDO14]], [[UMULH6]]
; GFX8: [[ZEXT8:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO17]](s1)
; GFX8: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ZEXT7]], [[ZEXT8]]
; GFX8: [[UADDO18:%[0-9]+]]:_(s32), [[UADDO19:%[0-9]+]]:_(s1) = G_UADDO [[UADDO16]], [[ADD4]]
; GFX8: [[ZEXT9:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO19]](s1)
; GFX8: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ZEXT9]]
; GFX8: [[UMULH7:%[0-9]+]]:_(s32) = G_UMULH [[UV9]], [[UV11]]
; GFX8: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH7]], [[ADD6]]
; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO18]](s32), [[ADD7]](s32)
; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX9-LABEL: name: test_umulh_v2s64
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
; GFX9: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV6]]
; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]]
; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO1]](s1)
; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UADDO]], [[UMULH]]
; GFX9: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO3]](s1)
; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV7]]
; GFX9: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV6]]
; GFX9: [[UMULH2:%[0-9]+]]:_(s32) = G_UMULH [[UV4]], [[UV7]]
; GFX9: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[MUL2]], [[UMULH1]]
; GFX9: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO5]](s1)
; GFX9: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UADDO4]], [[UMULH2]]
; GFX9: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO7]](s1)
; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ZEXT3]]
; GFX9: [[UADDO8:%[0-9]+]]:_(s32), [[UADDO9:%[0-9]+]]:_(s1) = G_UADDO [[UADDO6]], [[ADD]]
; GFX9: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO9]](s1)
; GFX9: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ZEXT4]]
; GFX9: [[UMULH3:%[0-9]+]]:_(s32) = G_UMULH [[UV5]], [[UV7]]
; GFX9: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH3]], [[ADD2]]
; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO8]](s32), [[ADD3]](s32)
; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
; GFX9: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV10]]
; GFX9: [[MUL4:%[0-9]+]]:_(s32) = G_MUL [[UV8]], [[UV11]]
; GFX9: [[UMULH4:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV10]]
; GFX9: [[UADDO10:%[0-9]+]]:_(s32), [[UADDO11:%[0-9]+]]:_(s1) = G_UADDO [[MUL3]], [[MUL4]]
; GFX9: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO11]](s1)
; GFX9: [[UADDO12:%[0-9]+]]:_(s32), [[UADDO13:%[0-9]+]]:_(s1) = G_UADDO [[UADDO10]], [[UMULH4]]
; GFX9: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO13]](s1)
; GFX9: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ZEXT5]], [[ZEXT6]]
; GFX9: [[MUL5:%[0-9]+]]:_(s32) = G_MUL [[UV9]], [[UV11]]
; GFX9: [[UMULH5:%[0-9]+]]:_(s32) = G_UMULH [[UV9]], [[UV10]]
; GFX9: [[UMULH6:%[0-9]+]]:_(s32) = G_UMULH [[UV8]], [[UV11]]
; GFX9: [[UADDO14:%[0-9]+]]:_(s32), [[UADDO15:%[0-9]+]]:_(s1) = G_UADDO [[MUL5]], [[UMULH5]]
; GFX9: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO15]](s1)
; GFX9: [[UADDO16:%[0-9]+]]:_(s32), [[UADDO17:%[0-9]+]]:_(s1) = G_UADDO [[UADDO14]], [[UMULH6]]
; GFX9: [[ZEXT8:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO17]](s1)
; GFX9: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ZEXT7]], [[ZEXT8]]
; GFX9: [[UADDO18:%[0-9]+]]:_(s32), [[UADDO19:%[0-9]+]]:_(s1) = G_UADDO [[UADDO16]], [[ADD4]]
; GFX9: [[ZEXT9:%[0-9]+]]:_(s32) = G_ZEXT [[UADDO19]](s1)
; GFX9: [[ADD6:%[0-9]+]]:_(s32) = G_ADD [[ADD5]], [[ZEXT9]]
; GFX9: [[UMULH7:%[0-9]+]]:_(s32) = G_UMULH [[UV9]], [[UV11]]
; GFX9: [[ADD7:%[0-9]+]]:_(s32) = G_ADD [[UMULH7]], [[ADD6]]
; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO18]](s32), [[ADD7]](s32)
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_UMULH %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
...
---
name: test_umulh_s16
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; GFX8-LABEL: name: test_umulh_s16
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C1]](s32)
; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C]]
; GFX8: $vgpr0 = COPY [[AND2]](s32)
; GFX9-LABEL: name: test_umulh_s16
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C1]](s32)
; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C]]
; GFX9: $vgpr0 = COPY [[AND2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(s16) = G_TRUNC %1
%4:_(s16) = G_UMULH %2, %3
%5:_(s32) = G_ZEXT %4
$vgpr0 = COPY %5
...
---
name: test_umulh_s8
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; GFX8-LABEL: name: test_umulh_s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX8: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[MUL]], [[C1]](s16)
; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; GFX8: $vgpr0 = COPY [[AND2]](s32)
; GFX9-LABEL: name: test_umulh_s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX9: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[AND]], [[AND1]]
; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[MUL]], [[C1]](s16)
; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; GFX9: $vgpr0 = COPY [[AND2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8) = G_TRUNC %0
%3:_(s8) = G_TRUNC %1
%4:_(s8) = G_UMULH %2, %3
%5:_(s32) = G_ZEXT %4
$vgpr0 = COPY %5
...
---
name: test_umulh_v2s16
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX8-LABEL: name: test_umulh_v2s16
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C1]](s32)
; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND2]], [[AND3]]
; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C1]](s32)
; GFX8: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR]], [[C]]
; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C]]
; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C1]](s32)
; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL]]
; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>)
; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
; GFX8: [[AND6:%[0-9]+]]:_(s32) = G_AND [[BITCAST1]], [[C]]
; GFX8: [[AND7:%[0-9]+]]:_(s32) = G_AND [[LSHR2]], [[C]]
; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND6]](s32), [[AND7]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: test_umulh_v2s16
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[C]]
; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[AND1]]
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[MUL]], [[C1]](s32)
; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[C]]
; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND2]], [[AND3]]
; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C1]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[LSHR]](s32), [[LSHR1]](s32)
; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[BUILD_VECTOR_TRUNC]](<2 x s16>)
; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
; GFX9: [[AND4:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C]]
; GFX9: [[AND5:%[0-9]+]]:_(s32) = G_AND [[LSHR2]], [[C]]
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND4]](s32), [[AND5]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s16>) = G_TRUNC %0
%3:_(<2 x s16>) = G_TRUNC %1
%4:_(<2 x s16>) = G_UMULH %2, %3
%5:_(<2 x s32>) = G_ZEXT %4
$vgpr0_vgpr1 = COPY %5
...
---
name: test_umulh_v3s8
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GFX8-LABEL: name: test_umulh_v3s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX8: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX8: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX8: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[MUL]], [[C1]](s16)
; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; GFX8: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[AND2]], [[AND3]]
; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[MUL1]], [[C1]](s16)
; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX8: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C]]
; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; GFX8: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C]]
; GFX8: [[MUL2:%[0-9]+]]:_(s16) = G_MUL [[AND4]], [[AND5]]
; GFX8: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[MUL2]], [[C1]](s16)
; GFX8: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; GFX8: [[AND6:%[0-9]+]]:_(s16) = G_AND [[LSHR]], [[C]]
; GFX8: [[AND7:%[0-9]+]]:_(s16) = G_AND [[LSHR1]], [[C]]
; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND7]], [[C1]](s16)
; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[SHL]]
; GFX8: [[AND8:%[0-9]+]]:_(s16) = G_AND [[LSHR2]], [[C]]
; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; GFX8: [[AND9:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C]]
; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND9]], [[C1]](s16)
; GFX8: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[SHL1]]
; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; GFX8: $vgpr0 = COPY [[OR2]](s32)
; GFX9-LABEL: name: test_umulh_v3s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[AND1]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32)
; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C]]
; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C]]
; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[AND2]](s16)
; GFX9: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[AND3]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[ANYEXT3]](s32)
; GFX9: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC1]]
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[C1]](s32)
; GFX9: [[LSHR:%[0-9]+]]:_(<2 x s16>) = G_LSHR [[MUL]], [[BUILD_VECTOR_TRUNC2]](<2 x s16>)
; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; GFX9: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; GFX9: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[AND4]](s16)
; GFX9: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[AND5]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT4]](s32), [[ANYEXT5]](s32)
; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C]]
; GFX9: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC7]], [[C]]
; GFX9: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[AND6]](s16)
; GFX9: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[AND7]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT6]](s32), [[ANYEXT7]](s32)
; GFX9: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC3]], [[BUILD_VECTOR_TRUNC4]]
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY7]](s32), [[COPY8]](s32)
; GFX9: [[LSHR1:%[0-9]+]]:_(<2 x s16>) = G_LSHR [[MUL1]], [[BUILD_VECTOR_TRUNC5]](<2 x s16>)
; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LSHR]](<2 x s16>)
; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C2]](s32)
; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[LSHR1]](<2 x s16>)
; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; GFX9: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
; GFX9: [[AND8:%[0-9]+]]:_(s16) = G_AND [[TRUNC8]], [[C]]
; GFX9: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
; GFX9: [[AND9:%[0-9]+]]:_(s16) = G_AND [[TRUNC9]], [[C]]
; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND9]], [[C3]](s16)
; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND8]], [[SHL]]
; GFX9: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
; GFX9: [[AND10:%[0-9]+]]:_(s16) = G_AND [[TRUNC10]], [[C]]
; GFX9: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[DEF1]](s32)
; GFX9: [[AND11:%[0-9]+]]:_(s16) = G_AND [[TRUNC11]], [[C]]
; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND11]], [[C3]](s16)
; GFX9: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND10]], [[SHL1]]
; GFX9: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[OR]](s16)
; GFX9: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s16)
; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C2]](s32)
; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]]
; GFX9: $vgpr0 = COPY [[OR2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
%3:_(s32) = COPY $vgpr3
%4:_(s32) = COPY $vgpr4
%20:_(s32) = COPY $vgpr5
%5:_(s8) = G_TRUNC %0
%6:_(s8) = G_TRUNC %1
%7:_(s8) = G_TRUNC %2
%8:_(s8) = G_TRUNC %3
%9:_(s8) = G_TRUNC %4
%10:_(s8) = G_TRUNC %20
%11:_(<3 x s8>) = G_BUILD_VECTOR %5, %6, %7
%12:_(<3 x s8>) = G_BUILD_VECTOR %8, %9, %10
%13:_(<3 x s8>) = G_UMULH %11, %12
%14:_(s8), %15:_(s8), %16:_(s8) = G_UNMERGE_VALUES %13
%17:_(s24) = G_MERGE_VALUES %14, %15, %16
%18:_(s32) = G_ANYEXT %17
$vgpr0 = COPY %18
...
---
name: test_umulh_v2s8
body: |
bb.0:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
; GFX8-LABEL: name: test_umulh_v2s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX8: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[MUL]], [[C1]](s16)
; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX8: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[AND2]], [[AND3]]
; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[MUL1]], [[C1]](s16)
; GFX8: [[AND4:%[0-9]+]]:_(s16) = G_AND [[LSHR]], [[C]]
; GFX8: [[AND5:%[0-9]+]]:_(s16) = G_AND [[LSHR1]], [[C]]
; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C1]](s16)
; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL]]
; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_umulh_v2s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[AND1]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32)
; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C]]
; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[AND2]](s16)
; GFX9: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[AND3]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[ANYEXT3]](s32)
; GFX9: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC1]]
; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[C2]](s32)
; GFX9: [[LSHR:%[0-9]+]]:_(<2 x s16>) = G_LSHR [[MUL]], [[BUILD_VECTOR_TRUNC2]](<2 x s16>)
; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LSHR]](<2 x s16>)
; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C3]](s32)
; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C]]
; GFX9: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C]]
; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C1]](s16)
; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL]]
; GFX9: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT4]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
%3:_(s32) = COPY $vgpr3
%5:_(s8) = G_TRUNC %0
%6:_(s8) = G_TRUNC %1
%7:_(s8) = G_TRUNC %2
%8:_(s8) = G_TRUNC %3
%11:_(<2 x s8>) = G_BUILD_VECTOR %5, %6
%12:_(<2 x s8>) = G_BUILD_VECTOR %7, %8
%13:_(<2 x s8>) = G_UMULH %11, %12
%14:_(s8), %15:_(s8) = G_UNMERGE_VALUES %13
%17:_(s16) = G_MERGE_VALUES %14, %15
%18:_(s32) = G_ANYEXT %17
$vgpr0 = COPY %18
...
---
name: test_umulh_v4s8
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; GFX8-LABEL: name: test_umulh_v4s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
; GFX8: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[AND]], [[AND1]]
; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[MUL]], [[C4]](s16)
; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; GFX8: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; GFX8: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]]
; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[AND2]], [[AND3]]
; GFX8: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[MUL1]], [[C4]](s16)
; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; GFX8: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C3]]
; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; GFX8: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C3]]
; GFX8: [[MUL2:%[0-9]+]]:_(s16) = G_MUL [[AND4]], [[AND5]]
; GFX8: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[MUL2]], [[C4]](s16)
; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
; GFX8: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C3]]
; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
; GFX8: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC7]], [[C3]]
; GFX8: [[MUL3:%[0-9]+]]:_(s16) = G_MUL [[AND6]], [[AND7]]
; GFX8: [[LSHR9:%[0-9]+]]:_(s16) = G_LSHR [[MUL3]], [[C4]](s16)
; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR6]](s16)
; GFX8: [[AND8:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C5]]
; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
; GFX8: [[AND9:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C5]]
; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C]](s32)
; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL]]
; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
; GFX8: [[AND10:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C5]]
; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C1]](s32)
; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; GFX8: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR9]](s16)
; GFX8: [[AND11:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C5]]
; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C2]](s32)
; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; GFX8: $vgpr0 = COPY [[OR2]](s32)
; GFX9-LABEL: name: test_umulh_v4s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s16)
; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[AND1]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32)
; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC4]], [[C3]]
; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C3]]
; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[AND2]](s16)
; GFX9: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[AND3]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[ANYEXT3]](s32)
; GFX9: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC1]]
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32)
; GFX9: [[LSHR6:%[0-9]+]]:_(<2 x s16>) = G_LSHR [[MUL]], [[BUILD_VECTOR_TRUNC2]](<2 x s16>)
; GFX9: [[AND4:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
; GFX9: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]]
; GFX9: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[AND4]](s16)
; GFX9: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[AND5]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT4]](s32), [[ANYEXT5]](s32)
; GFX9: [[AND6:%[0-9]+]]:_(s16) = G_AND [[TRUNC6]], [[C3]]
; GFX9: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC7]], [[C3]]
; GFX9: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[AND6]](s16)
; GFX9: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[AND7]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT6]](s32), [[ANYEXT7]](s32)
; GFX9: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC3]], [[BUILD_VECTOR_TRUNC4]]
; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
; GFX9: [[LSHR7:%[0-9]+]]:_(<2 x s16>) = G_LSHR [[MUL1]], [[BUILD_VECTOR_TRUNC5]](<2 x s16>)
; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[LSHR6]](<2 x s16>)
; GFX9: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C1]](s32)
; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[LSHR7]](<2 x s16>)
; GFX9: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[AND8:%[0-9]+]]:_(s32) = G_AND [[BITCAST]], [[C4]]
; GFX9: [[AND9:%[0-9]+]]:_(s32) = G_AND [[LSHR8]], [[C4]]
; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C]](s32)
; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL]]
; GFX9: [[AND10:%[0-9]+]]:_(s32) = G_AND [[BITCAST1]], [[C4]]
; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C1]](s32)
; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; GFX9: [[AND11:%[0-9]+]]:_(s32) = G_AND [[LSHR9]], [[C4]]
; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C2]](s32)
; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; GFX9: $vgpr0 = COPY [[OR2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0
%6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8) = G_UNMERGE_VALUES %1
%10:_(<4 x s8>) = G_BUILD_VECTOR %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8)
%11:_(<4 x s8>) = G_BUILD_VECTOR %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8)
%12:_(<4 x s8>) = G_UMULH %10:_, %11:_
%13:_(s8), %14:_(s8), %15:_(s8), %16:_(s8) = G_UNMERGE_VALUES %12:_(<4 x s8>)
%17:_(s32) = G_MERGE_VALUES %13, %14, %15, %16
$vgpr0 = COPY %17
...