blob: fcf5deff710afac8aaebe2a1ed742f5603fd2edf [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -global-isel-abort=0 -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
define i1 @i1_func_void() #0 {
; CHECK-LABEL: name: i1_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i1, i1 addrspace(1)* undef
ret i1 %val
}
define zeroext i1 @i1_zeroext_func_void() #0 {
; CHECK-LABEL: name: i1_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
; CHECK: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i1, i1 addrspace(1)* undef
ret i1 %val
}
define signext i1 @i1_signext_func_void() #0 {
; CHECK-LABEL: name: i1_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load 1 from `i1 addrspace(1)* undef`, addrspace 1)
; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
; CHECK: $vgpr0 = COPY [[SEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i1, i1 addrspace(1)* undef
ret i1 %val
}
define i8 @i8_func_void() #0 {
; CHECK-LABEL: name: i8_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
%val = load i8, i8 addrspace(1)* undef
ret i8 %val
}
define zeroext i8 @i8_zeroext_func_void() #0 {
; CHECK-LABEL: name: i8_zeroext_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
%val = load i8, i8 addrspace(1)* undef
ret i8 %val
}
define signext i8 @i8_signext_func_void() #0 {
; CHECK-LABEL: name: i8_signext_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `i8 addrspace(1)* undef`, addrspace 1)
%val = load i8, i8 addrspace(1)* undef
ret i8 %val
}
define i16 @i16_func_void() #0 {
; CHECK-LABEL: name: i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i16, i16 addrspace(1)* undef
ret i16 %val
}
define zeroext i16 @i16_zeroext_func_void() #0 {
; CHECK-LABEL: name: i16_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
; CHECK: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i16, i16 addrspace(1)* undef
ret i16 %val
}
define signext i16 @i16_signext_func_void() #0 {
; CHECK-LABEL: name: i16_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load 2 from `i16 addrspace(1)* undef`, addrspace 1)
; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
; CHECK: $vgpr0 = COPY [[SEXT]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i16, i16 addrspace(1)* undef
ret i16 %val
}
define i32 @i32_func_void() #0 {
; CHECK-LABEL: name: i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load 4 from `i32 addrspace(1)* undef`, addrspace 1)
; CHECK: $vgpr0 = COPY [[LOAD]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load i32, i32 addrspace(1)* undef
ret i32 %val
}
define i48 @i48_func_void() #0 {
; CHECK-LABEL: name: i48_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load 6 from `i48 addrspace(1)* undef`, align 8, addrspace 1)
; CHECK: [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[INSERT:%[0-9]+]]:_(s64) = G_INSERT [[DEF1]], [[LOAD]](s48), 0
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s64), 0
; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s64), 32
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
; CHECK: $vgpr1 = COPY [[EXTRACT1]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
%val = load i48, i48 addrspace(1)* undef, align 8
ret i48 %val
}
define i64 @i64_func_void() #0 {
; CHECK-LABEL: name: i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load 8 from `i64 addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
%val = load i64, i64 addrspace(1)* undef
ret i64 %val
}
define i65 @i65_func_void() #0 {
; CHECK-LABEL: name: i65_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load 9 from `i65 addrspace(1)* undef`, align 8, addrspace 1)
; CHECK: [[DEF1:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF
; CHECK: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF1]], [[LOAD]](s65), 0
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 0
; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 32
; CHECK: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INSERT]](s96), 64
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
; CHECK: $vgpr1 = COPY [[EXTRACT1]](s32)
; CHECK: $vgpr2 = COPY [[EXTRACT2]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = load i65, i65 addrspace(1)* undef
ret i65 %val
}
define float @f32_func_void() #0 {
; CHECK-LABEL: name: f32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load 4 from `float addrspace(1)* undef`, addrspace 1)
; CHECK: $vgpr0 = COPY [[LOAD]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0
%val = load float, float addrspace(1)* undef
ret float %val
}
define double @f64_func_void() #0 {
; CHECK-LABEL: name: f64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load 8 from `double addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
%val = load double, double addrspace(1)* undef
ret double %val
}
define <2 x double> @v2f64_func_void() #0 {
; CHECK-LABEL: name: v2f64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load 16 from `<2 x double> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = load <2 x double>, <2 x double> addrspace(1)* undef
ret <2 x double> %val
}
define <2 x i32> @v2i32_func_void() #0 {
; CHECK-LABEL: name: v2i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load 8 from `<2 x i32> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
%val = load <2 x i32>, <2 x i32> addrspace(1)* undef
ret <2 x i32> %val
}
define <3 x i32> @v3i32_func_void() #0 {
; CHECK-LABEL: name: v3i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load 12 from `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = load <3 x i32>, <3 x i32> addrspace(1)* undef
ret <3 x i32> %val
}
define <4 x i32> @v4i32_func_void() #0 {
; CHECK-LABEL: name: v4i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load 16 from `<4 x i32> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = load <4 x i32>, <4 x i32> addrspace(1)* undef
ret <4 x i32> %val
}
define <5 x i32> @v5i32_func_void() #0 {
; CHECK-LABEL: name: v5i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load 20 from `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
%val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
ret <5 x i32> %val
}
define <8 x i32> @v8i32_func_void() #0 {
; CHECK-LABEL: name: v8i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
%ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
%val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
ret <8 x i32> %val
}
define <16 x i32> @v16i32_func_void() #0 {
; CHECK-LABEL: name: v16i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load 64 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: $vgpr8 = COPY [[UV8]](s32)
; CHECK: $vgpr9 = COPY [[UV9]](s32)
; CHECK: $vgpr10 = COPY [[UV10]](s32)
; CHECK: $vgpr11 = COPY [[UV11]](s32)
; CHECK: $vgpr12 = COPY [[UV12]](s32)
; CHECK: $vgpr13 = COPY [[UV13]](s32)
; CHECK: $vgpr14 = COPY [[UV14]](s32)
; CHECK: $vgpr15 = COPY [[UV15]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
%ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
%val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
ret <16 x i32> %val
}
define <32 x i32> @v32i32_func_void() #0 {
; CHECK-LABEL: name: v32i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: $vgpr8 = COPY [[UV8]](s32)
; CHECK: $vgpr9 = COPY [[UV9]](s32)
; CHECK: $vgpr10 = COPY [[UV10]](s32)
; CHECK: $vgpr11 = COPY [[UV11]](s32)
; CHECK: $vgpr12 = COPY [[UV12]](s32)
; CHECK: $vgpr13 = COPY [[UV13]](s32)
; CHECK: $vgpr14 = COPY [[UV14]](s32)
; CHECK: $vgpr15 = COPY [[UV15]](s32)
; CHECK: $vgpr16 = COPY [[UV16]](s32)
; CHECK: $vgpr17 = COPY [[UV17]](s32)
; CHECK: $vgpr18 = COPY [[UV18]](s32)
; CHECK: $vgpr19 = COPY [[UV19]](s32)
; CHECK: $vgpr20 = COPY [[UV20]](s32)
; CHECK: $vgpr21 = COPY [[UV21]](s32)
; CHECK: $vgpr22 = COPY [[UV22]](s32)
; CHECK: $vgpr23 = COPY [[UV23]](s32)
; CHECK: $vgpr24 = COPY [[UV24]](s32)
; CHECK: $vgpr25 = COPY [[UV25]](s32)
; CHECK: $vgpr26 = COPY [[UV26]](s32)
; CHECK: $vgpr27 = COPY [[UV27]](s32)
; CHECK: $vgpr28 = COPY [[UV28]](s32)
; CHECK: $vgpr29 = COPY [[UV29]](s32)
; CHECK: $vgpr30 = COPY [[UV30]](s32)
; CHECK: $vgpr31 = COPY [[UV31]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
%ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
%val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
ret <32 x i32> %val
}
define <2 x i64> @v2i64_func_void() #0 {
; CHECK-LABEL: name: v2i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load 16 from `<2 x i64> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = load <2 x i64>, <2 x i64> addrspace(1)* undef
ret <2 x i64> %val
}
define <3 x i64> @v3i64_func_void() #0 {
; CHECK-LABEL: name: v3i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<3 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load 24 from %ir.ptr, align 32, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
%ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(4)* undef
%val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
ret <3 x i64> %val
}
define <4 x i64> @v4i64_func_void() #0 {
; CHECK-LABEL: name: v4i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<4 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
%ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(4)* undef
%val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
ret <4 x i64> %val
}
define <5 x i64> @v5i64_func_void() #0 {
; CHECK-LABEL: name: v5i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<5 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load 40 from %ir.ptr, align 64, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<5 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: $vgpr8 = COPY [[UV8]](s32)
; CHECK: $vgpr9 = COPY [[UV9]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
%ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(4)* undef
%val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
ret <5 x i64> %val
}
define <8 x i64> @v8i64_func_void() #0 {
; CHECK-LABEL: name: v8i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load 64 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: $vgpr8 = COPY [[UV8]](s32)
; CHECK: $vgpr9 = COPY [[UV9]](s32)
; CHECK: $vgpr10 = COPY [[UV10]](s32)
; CHECK: $vgpr11 = COPY [[UV11]](s32)
; CHECK: $vgpr12 = COPY [[UV12]](s32)
; CHECK: $vgpr13 = COPY [[UV13]](s32)
; CHECK: $vgpr14 = COPY [[UV14]](s32)
; CHECK: $vgpr15 = COPY [[UV15]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
%ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(4)* undef
%val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
ret <8 x i64> %val
}
define <16 x i64> @v16i64_func_void() #0 {
; CHECK-LABEL: name: v16i64_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s64>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[UV3]](s32)
; CHECK: $vgpr4 = COPY [[UV4]](s32)
; CHECK: $vgpr5 = COPY [[UV5]](s32)
; CHECK: $vgpr6 = COPY [[UV6]](s32)
; CHECK: $vgpr7 = COPY [[UV7]](s32)
; CHECK: $vgpr8 = COPY [[UV8]](s32)
; CHECK: $vgpr9 = COPY [[UV9]](s32)
; CHECK: $vgpr10 = COPY [[UV10]](s32)
; CHECK: $vgpr11 = COPY [[UV11]](s32)
; CHECK: $vgpr12 = COPY [[UV12]](s32)
; CHECK: $vgpr13 = COPY [[UV13]](s32)
; CHECK: $vgpr14 = COPY [[UV14]](s32)
; CHECK: $vgpr15 = COPY [[UV15]](s32)
; CHECK: $vgpr16 = COPY [[UV16]](s32)
; CHECK: $vgpr17 = COPY [[UV17]](s32)
; CHECK: $vgpr18 = COPY [[UV18]](s32)
; CHECK: $vgpr19 = COPY [[UV19]](s32)
; CHECK: $vgpr20 = COPY [[UV20]](s32)
; CHECK: $vgpr21 = COPY [[UV21]](s32)
; CHECK: $vgpr22 = COPY [[UV22]](s32)
; CHECK: $vgpr23 = COPY [[UV23]](s32)
; CHECK: $vgpr24 = COPY [[UV24]](s32)
; CHECK: $vgpr25 = COPY [[UV25]](s32)
; CHECK: $vgpr26 = COPY [[UV26]](s32)
; CHECK: $vgpr27 = COPY [[UV27]](s32)
; CHECK: $vgpr28 = COPY [[UV28]](s32)
; CHECK: $vgpr29 = COPY [[UV29]](s32)
; CHECK: $vgpr30 = COPY [[UV30]](s32)
; CHECK: $vgpr31 = COPY [[UV31]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
%ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(4)* undef
%val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
ret <16 x i64> %val
}
define <2 x i16> @v2i16_func_void() #0 {
; CHECK-LABEL: name: v2i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load 4 from `<2 x i16> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<2 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
%val = load <2 x i16>, <2 x i16> addrspace(1)* undef
ret <2 x i16> %val
}
define <3 x i16> @v3i16_func_void() #0 {
; CHECK-LABEL: name: v3i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load 6 from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<3 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = load <3 x i16>, <3 x i16> addrspace(1)* undef
ret <3 x i16> %val
}
define <4 x i16> @v4i16_func_void() #0 {
; CHECK-LABEL: name: v4i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load 8 from `<4 x i16> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = load <4 x i16>, <4 x i16> addrspace(1)* undef
ret <4 x i16> %val
}
define <4 x half> @v4f16_func_void() #0 {
; CHECK-LABEL: name: v4f16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load 8 from `<4 x half> addrspace(1)* undef`, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = load <4 x half>, <4 x half> addrspace(1)* undef
ret <4 x half> %val
}
define <5 x i16> @v5i16_func_void() #0 {
; CHECK-LABEL: name: v5i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<5 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load 10 from %ir.ptr, align 16, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<5 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
%ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(4)* undef
%val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
ret <5 x i16> %val
}
define <8 x i16> @v8i16_func_void() #0 {
; CHECK-LABEL: name: v8i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<8 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load 16 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<8 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
%ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(4)* undef
%val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
ret <8 x i16> %val
}
define <16 x i16> @v16i16_func_void() #0 {
; CHECK-LABEL: name: v16i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load 32 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<16 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
; CHECK: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s16)
; CHECK: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s16)
; CHECK: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s16)
; CHECK: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s16)
; CHECK: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s16)
; CHECK: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s16)
; CHECK: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
; CHECK: $vgpr8 = COPY [[ANYEXT8]](s32)
; CHECK: $vgpr9 = COPY [[ANYEXT9]](s32)
; CHECK: $vgpr10 = COPY [[ANYEXT10]](s32)
; CHECK: $vgpr11 = COPY [[ANYEXT11]](s32)
; CHECK: $vgpr12 = COPY [[ANYEXT12]](s32)
; CHECK: $vgpr13 = COPY [[ANYEXT13]](s32)
; CHECK: $vgpr14 = COPY [[ANYEXT14]](s32)
; CHECK: $vgpr15 = COPY [[ANYEXT15]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
%ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(4)* undef
%val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
ret <16 x i16> %val
}
define <16 x i8> @v16i8_func_void() #0 {
; CHECK-LABEL: name: v16i8_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load 16 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
; CHECK: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
; CHECK: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
; CHECK: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
; CHECK: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
; CHECK: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
; CHECK: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
; CHECK: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
; CHECK: $vgpr8 = COPY [[ANYEXT8]](s32)
; CHECK: $vgpr9 = COPY [[ANYEXT9]](s32)
; CHECK: $vgpr10 = COPY [[ANYEXT10]](s32)
; CHECK: $vgpr11 = COPY [[ANYEXT11]](s32)
; CHECK: $vgpr12 = COPY [[ANYEXT12]](s32)
; CHECK: $vgpr13 = COPY [[ANYEXT13]](s32)
; CHECK: $vgpr14 = COPY [[ANYEXT14]](s32)
; CHECK: $vgpr15 = COPY [[ANYEXT15]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
%ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
%val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
ret <16 x i8> %val
}
define <4 x i8> @v4i8_func_void() #0 {
; CHECK-LABEL: name: v4i8_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%ptr = load volatile <4 x i8> addrspace(1)*, <4 x i8> addrspace(1)* addrspace(4)* undef
%val = load <4 x i8>, <4 x i8> addrspace(1)* %ptr
ret <4 x i8> %val
}
define {i8, i32} @struct_i8_i32_func_void() #0 {
; CHECK-LABEL: name: struct_i8_i32_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load 1 from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[DEF]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
%val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
ret { i8, i32 } %val
}
define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret %arg0) #0 {
; CHECK-LABEL: name: void_func_sret_struct_i8_i32
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load 1 from `i8 addrspace(1)* undef`, addrspace 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p1) :: (volatile load 4 from `i32 addrspace(1)* undef`, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[GEP:%[0-9]+]]:_(p5) = G_GEP [[COPY]], [[C]](s32)
; CHECK: G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store 1 into %ir.gep01, addrspace 5)
; CHECK: G_STORE [[LOAD1]](s32), [[GEP]](p5) :: (store 4 into %ir.gep1, addrspace 5)
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
; CHECK: S_SETPC_B64_return [[COPY2]]
%val0 = load volatile i8, i8 addrspace(1)* undef
%val1 = load volatile i32, i32 addrspace(1)* undef
%gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
%gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 1
store i8 %val0, i8 addrspace(5)* %gep0
store i32 %val1, i32 addrspace(5)* %gep1
ret void
}
; FIXME: Should be able to fold offsets in all of these pre-gfx9. Call
; lowering introduces an extra CopyToReg/CopyFromReg obscuring the
; AssertZext inserted. Not using it introduces the spills.
define <33 x i32> @v33i32_func_void() #0 {
; CHECK-LABEL: name: v33i32_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load 132 from %ir.ptr, align 256, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32), [[UV32:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<33 x s32>)
%ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
%val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
ret <33 x i32> %val
}
define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
; CHECK-LABEL: name: struct_v32i32_i32_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load 128 from %ir.ptr, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4 from %ir.ptr + 128, align 128, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
%ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
%val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
ret { <32 x i32>, i32 }%val
}
define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
; CHECK-LABEL: name: struct_i32_v32i32_func_void
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load 8 from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load 4 from %ir.ptr, align 128, addrspace 1)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[LOAD]], [[C]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[GEP]](p1) :: (load 128 from %ir.ptr + 128, addrspace 1)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<32 x s32>)
%ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
%val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
ret { i32, <32 x i32> }%val
}
; Make sure the last struct component is returned in v3, not v4.
define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
; CHECK: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
; CHECK: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[LOAD3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%load0 = load volatile i32, i32 addrspace(3)* undef
%load1 = load volatile i32, i32 addrspace(3)* undef
%load2 = load volatile i32, i32 addrspace(3)* undef
%load3 = load volatile i32, i32 addrspace(3)* undef
%insert.0 = insertelement <3 x i32> undef, i32 %load0, i32 0
%insert.1 = insertelement <3 x i32> %insert.0, i32 %load1, i32 1
%insert.2 = insertelement <3 x i32> %insert.1, i32 %load2, i32 2
%insert.3 = insertvalue { <3 x i32>, i32 } undef, <3 x i32> %insert.2, 0
%insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1
ret { <3 x i32>, i32 } %insert.4
}
define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK: [[DEF2:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[DEF3:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load 4 from `float addrspace(3)* undef`, addrspace 3)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p3) :: (volatile load 4 from `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF2]], [[LOAD]](s32), [[C]](s32)
; CHECK: [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
; CHECK: [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
; CHECK: $vgpr0 = COPY [[UV]](s32)
; CHECK: $vgpr1 = COPY [[UV1]](s32)
; CHECK: $vgpr2 = COPY [[UV2]](s32)
; CHECK: $vgpr3 = COPY [[LOAD3]](s32)
; CHECK: [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
; CHECK: S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%load0 = load volatile float, float addrspace(3)* undef
%load1 = load volatile float, float addrspace(3)* undef
%load2 = load volatile float, float addrspace(3)* undef
%load3 = load volatile i32, i32 addrspace(3)* undef
%insert.0 = insertelement <3 x float> undef, float %load0, i32 0
%insert.1 = insertelement <3 x float> %insert.0, float %load1, i32 1
%insert.2 = insertelement <3 x float> %insert.1, float %load2, i32 2
%insert.3 = insertvalue { <3 x float>, i32 } undef, <3 x float> %insert.2, 0
%insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
ret { <3 x float>, i32 } %insert.4
}
define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret %arg0) #0 {
; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
; CHECK: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
; CHECK: [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C1]](s32)
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C2]](s32)
; CHECK: G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store 4 into `i32 addrspace(3)* undef`, addrspace 3)
; CHECK: [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
; CHECK: S_SETPC_B64_return [[COPY2]]
%arg0.int = ptrtoint i8 addrspace(5)* %arg0 to i32
%lshr0 = lshr i32 %arg0.int, 16
%lshr1 = lshr i32 %arg0.int, 17
%lshr2 = lshr i32 %arg0.int, 18
store volatile i32 %lshr0, i32 addrspace(3)* undef
store volatile i32 %lshr1, i32 addrspace(3)* undef
store volatile i32 %lshr2, i32 addrspace(3)* undef
ret void
}
attributes #0 = { nounwind }