| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 |
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s |
| |
| declare i32 @llvm.ctpop.i32(i32) |
| declare i64 @llvm.ctpop.i64(i64) |
| declare i32 @llvm.amdgcn.s.quadmask.i32(i32) |
| declare i64 @llvm.amdgcn.s.quadmask.i64(i64) |
| |
| define amdgpu_ps i32 @shl32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: shl32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_lshl_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = shl i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @shl64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: shl64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_lshl_b64 s[0:1], s[0:1], s2 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = shl i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @lshr32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: lshr32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_lshr_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = lshr i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @lshr64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: lshr64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_lshr_b64 s[0:1], s[0:1], s2 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = lshr i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @ashr32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: ashr32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_ashr_i32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = ashr i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @ashr64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: ashr64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_ashr_i64 s[0:1], s[0:1], s2 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = ashr i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @abs32(i32 inreg %val0) { |
| ; CHECK-LABEL: abs32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_abs_i32 s0, s0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %neg = sub i32 0, %val0 |
| %cond = icmp sgt i32 %val0, %neg |
| %result = select i1 %cond, i32 %val0, i32 %neg |
| call void asm "; use $0", "s"(i32 %result) |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @absdiff32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: absdiff32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_absdiff_i32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %diff = sub i32 %val0, %val1 |
| %result = call i32 @llvm.abs.i32(i32 %diff, i1 false) |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @and32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: and32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_and_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = and i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @and64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: and64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = and i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @or32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: or32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_or_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = or i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @or64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: or64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = or i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @xor32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: xor32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_xor_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i32 %val0, %val1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @xor64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: xor64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i64 %val0, %val1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @nand32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: nand32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_nand_b32 s0, s0, s1 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = and i32 %val0, %val1 |
| %result2 = xor i32 %result, -1 |
| call void asm "; use $0", "s"(i32 %result2) |
| %cmp = icmp ne i32 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @nand64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: nand64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_nand_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = and i64 %val0, %val1 |
| %result2 = xor i64 %result, -1 |
| call void asm "; use $0", "s"(i64 %result2) |
| %cmp = icmp ne i64 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @nor32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: nor32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_nor_b32 s0, s0, s1 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = or i32 %val0, %val1 |
| %result2 = xor i32 %result, -1 |
| call void asm "; use $0", "s"(i32 %result2) |
| %cmp = icmp ne i32 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @nor64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: nor64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_nor_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = or i64 %val0, %val1 |
| %result2 = xor i64 %result, -1 |
| call void asm "; use $0", "s"(i64 %result2) |
| %cmp = icmp ne i64 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @xnor32(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: xnor32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_xnor_b32 s0, s0, s1 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i32 %val0, %val1 |
| %result2 = xor i32 %result, -1 |
| call void asm "; use $0", "s"(i32 %result2) |
| %cmp = icmp ne i32 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @xnor64(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: xnor64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_xnor_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i64 %val0, %val1 |
| %result2 = xor i64 %result, -1 |
| call void asm "; use $0", "s"(i64 %result2) |
| %cmp = icmp ne i64 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @andn232(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: andn232: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_andn2_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %nval1 = xor i32 %val1, -1 |
| %result = and i32 %val0, %nval1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @nandn264(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: nandn264: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %nval1 = xor i64 %val1, -1 |
| %result = and i64 %val0, %nval1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @orn232(i32 inreg %val0, i32 inreg %val1) { |
| ; CHECK-LABEL: orn232: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_orn2_b32 s0, s0, s1 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %nval1 = xor i32 %val1, -1 |
| %result = or i32 %val0, %nval1 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @orn264(i64 inreg %val0, i64 inreg %val1) { |
| ; CHECK-LABEL: orn264: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_orn2_b64 s[0:1], s[0:1], s[2:3] |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %nval1 = xor i64 %val1, -1 |
| %result = or i64 %val0, %nval1 |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bfe_i32(i32 inreg %val0) { |
| ; CHECK-LABEL: bfe_i32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bfe_i32 s0, s0, 0x80010 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %shl = shl i32 %val0, 8 |
| %result = ashr i32 %shl, 24 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bfe_i64(i64 inreg %val0) { |
| ; CHECK-LABEL: bfe_i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bfe_i64 s[2:3], s[0:1], 0x80000 |
| ; CHECK-NEXT: s_and_b32 s0, s0, 0xff |
| ; CHECK-NEXT: s_mov_b32 s1, 0 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[2:3] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: ; return to shader part epilog |
| %shl = shl i64 %val0, 56 |
| %result = ashr i64 %shl, 56 |
| call void asm "; use $0", "s"(i64 %result) |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bfe_u32(i32 inreg %val0) { |
| ; CHECK-LABEL: bfe_u32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bfe_u32 s0, s0, 0x80010 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %shl = shl i32 %val0, 8 |
| %result = lshr i32 %shl, 24 |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bfe_u64(i64 inreg %val0) { |
| ; CHECK-LABEL: bfe_u64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_and_b32 s0, s0, 0xff |
| ; CHECK-NEXT: s_mov_b32 s1, 0 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %shl = shl i64 %val0, 56 |
| %result = lshr i64 %shl, 56 |
| call void asm "; use $0", "s"(i64 %result) |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bcnt032(i32 inreg %val0) { |
| ; CHECK-LABEL: bcnt032: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bcnt1_i32_b32 s0, s0 |
| ; CHECK-NEXT: s_sub_i32 s0, 32, s0 |
| ; CHECK-NEXT: s_cmp_lg_u32 s0, 0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone |
| %result2 = sub i32 32, %result |
| call void asm "; use $0", "s"(i32 %result2) |
| %cmp = icmp ne i32 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bcnt064(i64 inreg %val0) { |
| ; CHECK-LABEL: bcnt064: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bcnt1_i32_b64 s0, s[0:1] |
| ; CHECK-NEXT: s_sub_u32 s0, 64, s0 |
| ; CHECK-NEXT: s_subb_u32 s1, 0, 0 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i64 @llvm.ctpop.i64(i64 %val0) nounwind readnone |
| %result2 = sub i64 64, %result |
| call void asm "; use $0", "s"(i64 %result2) |
| %cmp = icmp ne i64 %result2, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bcnt132(i32 inreg %val0) { |
| ; CHECK-LABEL: bcnt132: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bcnt1_i32_b32 s0, s0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i32 @llvm.ctpop.i32(i32 %val0) nounwind readnone |
| call void asm "; use $0", "s"(i32 %result) |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @bcnt164(i64 inreg %val0) { |
| ; CHECK-LABEL: bcnt164: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_bcnt1_i32_b64 s0, s[0:1] |
| ; CHECK-NEXT: s_mov_b32 s1, 0 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i64 @llvm.ctpop.i64(i64 %val0) nounwind readnone |
| call void asm "; use $0", "s"(i64 %result) |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @quadmask32(i32 inreg %val0) { |
| ; CHECK-LABEL: quadmask32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_quadmask_b32 s0, s0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val0) nounwind readnone |
| call void asm "; use $0", "s"(i32 %result) |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @quadmask64(i64 inreg %val0) { |
| ; CHECK-LABEL: quadmask64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_quadmask_b64 s[0:1], s[0:1] |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val0) nounwind readnone |
| call void asm "; use $0", "s"(i64 %result) |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @not32(i32 inreg %val0) { |
| ; CHECK-LABEL: not32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_not_b32 s0, s0 |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s0 |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i32 %val0, -1 |
| call void asm "; use $0", "s"(i32 %result) |
| %cmp = icmp ne i32 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| define amdgpu_ps i32 @not64(i64 inreg %val0) { |
| ; CHECK-LABEL: not64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_not_b64 s[0:1], s[0:1] |
| ; CHECK-NEXT: ;;#ASMSTART |
| ; CHECK-NEXT: ; use s[0:1] |
| ; CHECK-NEXT: ;;#ASMEND |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %result = xor i64 %val0, -1 |
| call void asm "; use $0", "s"(i64 %result) |
| %cmp = icmp ne i64 %result, 0 |
| %zext = zext i1 %cmp to i32 |
| ret i32 %zext |
| } |
| |
| |
| ; -------------------------------------------------------------------------------- |
| ; Negative tests |
| ; -------------------------------------------------------------------------------- |
| |
| @1 = extern_weak dso_local addrspace(4) constant i32 |
| |
| define amdgpu_ps i32 @si_pc_add_rel_offset_must_not_optimize() { |
| ; CHECK-LABEL: si_pc_add_rel_offset_must_not_optimize: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_getpc_b64 s[0:1] |
| ; CHECK-NEXT: s_add_u32 s0, s0, __unnamed_1@rel32@lo+4 |
| ; CHECK-NEXT: s_addc_u32 s1, s1, __unnamed_1@rel32@hi+12 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: s_cbranch_scc0 .LBB36_2 |
| ; CHECK-NEXT: ; %bb.1: ; %endif |
| ; CHECK-NEXT: s_mov_b32 s0, 1 |
| ; CHECK-NEXT: s_branch .LBB36_3 |
| ; CHECK-NEXT: .LBB36_2: ; %if |
| ; CHECK-NEXT: s_mov_b32 s0, 0 |
| ; CHECK-NEXT: s_branch .LBB36_3 |
| ; CHECK-NEXT: .LBB36_3: |
| %cmp = icmp ne ptr addrspace(4) @1, null |
| br i1 %cmp, label %endif, label %if |
| |
| if: |
| ret i32 0 |
| |
| endif: |
| ret i32 1 |
| } |