blob: b9bf76c1423b6a2af2fd296e8528fdf4833a5b79 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s
declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
define i32 @test_quadmask_constant_zero_i32() {
; GFX11-LABEL: test_quadmask_constant_zero_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 0)
ret i32 %qm
}
define i32 @test_quadmask_constant_neg_one_i32() {
; GFX11-LABEL: test_quadmask_constant_neg_one_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, 0xff
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 -1)
ret i32 %qm
}
define i32 @test_quadmask_constant_undef_i32() {
; GFX11-LABEL: test_quadmask_constant_undef_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 undef)
ret i32 %qm
}
define i32 @test_quadmask_constant_poison_i32() {
; GFX11-LABEL: test_quadmask_constant_poison_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 poison)
ret i32 %qm
}
define i32 @test_quadmask_constant_i32() {
; GFX11-LABEL: test_quadmask_constant_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v0, 0xcb
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 u0x85003092)
ret i32 %qm
}
define amdgpu_cs void @test_quadmask_sgpr_i32(i32 inreg %mask, ptr addrspace(1) %out) {
; GFX11-LABEL: test_quadmask_sgpr_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v2, s0
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_endpgm
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
store i32 %qm, ptr addrspace(1) %out
ret void
}
define i32 @test_quadmask_vgpr_i32(i32 %mask) {
; GFX11-LABEL: test_quadmask_vgpr_i32:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: s_quadmask_b32 s0, s0
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %mask)
ret i32 %qm
}
define i64 @test_quadmask_constant_i64() {
; GFX11-LABEL: test_quadmask_constant_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 0xe3e6 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 u0x67D000FC85F00A90)
ret i64 %qm
}
define i64 @test_quadmask_constant_zero_i64() {
; GFX11-LABEL: test_quadmask_constant_zero_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 0)
ret i64 %qm
}
define i64 @test_quadmask_constant_neg_one_i64() {
; GFX11-LABEL: test_quadmask_constant_neg_one_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 0xffff :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 -1)
ret i64 %qm
}
define i64 @test_quadmask_constant_undef_i64() {
; GFX11-LABEL: test_quadmask_constant_undef_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 undef)
ret i64 %qm
}
define i64 @test_quadmask_constant_poison_i64() {
; GFX11-LABEL: test_quadmask_constant_poison_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 poison)
ret i64 %qm
}
define amdgpu_cs void @test_quadmask_sgpr_i64(i64 inreg %mask, ptr addrspace(1) %out) {
; GFX11-LABEL: test_quadmask_sgpr_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: s_endpgm
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
store i64 %qm, ptr addrspace(1) %out
ret void
}
define i64 @test_quadmask_vgpr_i64(i64 %mask) {
; GFX11-LABEL: test_quadmask_vgpr_i64:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
; GFX11-NEXT: v_readfirstlane_b32 s1, v1
; GFX11-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
ret i64 %qm
}
;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC.
define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
; GFX11-GISEL-LABEL: test_scc_quadmask_32:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1
; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1
; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0
; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1
; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0
; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3]
; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off
; GFX11-GISEL-NEXT: s_endpgm
;
; GFX11-SDAG-LABEL: test_scc_quadmask_32:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1
; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1
; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0
; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3]
; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off
; GFX11-SDAG-NEXT: s_endpgm
%and = and i32 %val0, 1
%result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
store i32 %result, ptr addrspace(1) %ptr
%cmp = icmp eq i32 %and, 0
%sel = select i1 %cmp, i32 1, i32 0
store i32 %sel, ptr addrspace(1) null, align 4
ret void
}
;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC.
define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
; GFX11-GISEL-LABEL: test_scc_quadmask_64:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1
; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0
; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1
; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0
; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0
; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3]
; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off
; GFX11-GISEL-NEXT: s_endpgm
;
; GFX11-SDAG-LABEL: test_scc_quadmask_64:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_clause 0x1
; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1
; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0
; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0
; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0
; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3]
; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off
; GFX11-SDAG-NEXT: s_endpgm
%and = and i32 %val0, 1
%result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
store i64 %result, ptr addrspace(1) %ptr
%cmp = icmp eq i32 %and, 0
%sel = select i1 %cmp, i32 1, i32 0
store i32 %sel, ptr addrspace(1) null, align 4
ret void
}