| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ;; Test that carryout from 64-bit add/sub (synthesized from two 32-bit adds/subs) is utilized |
| ;; (i.e. no additional compare is generated). |
| |
| ; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s |
| |
| %struct.uint96 = type { i64, i32 } |
| %struct.uint64pair = type { i64, i64 } |
| |
| declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) |
| declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) |
| |
| declare {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64>, <2 x i64>) |
| declare {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64>, <2 x i64>) |
| |
| define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) { |
| ; CHECK-LABEL: v_add64_32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v5, vcc, v0, v2 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v6, vcc, v1, v3, vcc |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1] |
| ; CHECK-NEXT: v_mov_b32_e32 v0, v5 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v4, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v6 |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %sum64 = add i64 %val64A, %val64B |
| %obit = icmp ult i64 %sum64, %val64A |
| %obit32 = zext i1 %obit to i32 |
| %sum32 = add i32 %val32, %obit32 |
| %.fca.0.insert = insertvalue %struct.uint96 poison, i64 %sum64, 0 |
| %.fca.1.insert = insertvalue %struct.uint96 %.fca.0.insert, i32 %sum32, 1 |
| ret %struct.uint96 %.fca.1.insert |
| } |
| |
| define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_uadd_v2i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc |
| ; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v5, vcc |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3] |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v3, v2 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) |
| %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0 |
| %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1 |
| %res = sext <2 x i1> %obit to <2 x i64> |
| store <2 x i64> %val, ptr %ptrval |
| ret <2 x i64> %res |
| } |
| |
| define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_usub_v2i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_sub_co_u32_e32 v6, vcc, v2, v6 |
| ; CHECK-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc |
| ; CHECK-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v4 |
| ; CHECK-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v5, vcc |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3] |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v3, v2 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) |
| %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0 |
| %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1 |
| %res = sext <2 x i1> %obit to <2 x i64> |
| store <2 x i64> %val, ptr %ptrval |
| ret <2 x i64> %res |
| } |
| |
| define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_uadd_i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 %val1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_uadd_p1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc |
| ; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define i64 @v_uadd_n1(i64 %val0, i64 %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_uadd_n1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, -1, v0 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc |
| ; CHECK-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 -1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_usub_p1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, -1, v0 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) { |
| ; CHECK-LABEL: v_usub_n1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, 1, v0 |
| ; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1] |
| ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_mov_b32_e32 v1, v0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: s_setpc_b64 s[30:31] |
| %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 -1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| ; test SGPR |
| ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; |
| |
| define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) { |
| ; CHECK-LABEL: s_add64_32: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s6, s0, s2 |
| ; CHECK-NEXT: v_mov_b32_e32 v0, s0 |
| ; CHECK-NEXT: s_addc_u32 s7, s1, s3 |
| ; CHECK-NEXT: v_mov_b32_e32 v1, s1 |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1] |
| ; CHECK-NEXT: s_mov_b32 s0, s6 |
| ; CHECK-NEXT: s_cmp_lg_u64 vcc, 0 |
| ; CHECK-NEXT: s_addc_u32 s2, s4, 0 |
| ; CHECK-NEXT: s_mov_b32 s1, s7 |
| ; CHECK-NEXT: ; return to shader part epilog |
| %sum64 = add i64 %val64A, %val64B |
| %obit = icmp ult i64 %sum64, %val64A |
| %obit32 = zext i1 %obit to i32 |
| %sum32 = add i32 %val32, %obit32 |
| %.fca.0.insert = insertvalue %struct.uint96 poison, i64 %sum64, 0 |
| %.fca.1.insert = insertvalue %struct.uint96 %.fca.0.insert, i32 %sum32, 1 |
| ret %struct.uint96 %.fca.1.insert |
| } |
| |
| define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_uadd_v2i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s6, s2, s6 |
| ; CHECK-NEXT: v_mov_b32_e32 v9, s3 |
| ; CHECK-NEXT: s_addc_u32 s7, s3, s7 |
| ; CHECK-NEXT: v_mov_b32_e32 v8, s2 |
| ; CHECK-NEXT: s_add_u32 s4, s0, s4 |
| ; CHECK-NEXT: v_mov_b32_e32 v7, s1 |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9] |
| ; CHECK-NEXT: s_addc_u32 s5, s1, s5 |
| ; CHECK-NEXT: v_mov_b32_e32 v6, s0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7] |
| ; CHECK-NEXT: v_readfirstlane_b32 s2, v8 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v6 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s4 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s5 |
| ; CHECK-NEXT: v_mov_b32_e32 v4, s6 |
| ; CHECK-NEXT: v_mov_b32_e32 v5, s7 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_mov_b32 s3, s2 |
| ; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) |
| %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0 |
| %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1 |
| %res = sext <2 x i1> %obit to <2 x i64> |
| store <2 x i64> %val, ptr %ptrval |
| ret <2 x i64> %res |
| } |
| |
| define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_usub_v2i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_sub_u32 s6, s2, s6 |
| ; CHECK-NEXT: v_mov_b32_e32 v9, s3 |
| ; CHECK-NEXT: s_subb_u32 s7, s3, s7 |
| ; CHECK-NEXT: v_mov_b32_e32 v8, s2 |
| ; CHECK-NEXT: s_sub_u32 s4, s0, s4 |
| ; CHECK-NEXT: v_mov_b32_e32 v7, s1 |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9] |
| ; CHECK-NEXT: s_subb_u32 s5, s1, s5 |
| ; CHECK-NEXT: v_mov_b32_e32 v6, s0 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7] |
| ; CHECK-NEXT: v_readfirstlane_b32 s2, v8 |
| ; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v6 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s4 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s5 |
| ; CHECK-NEXT: v_mov_b32_e32 v4, s6 |
| ; CHECK-NEXT: v_mov_b32_e32 v5, s7 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_mov_b32 s3, s2 |
| ; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) |
| %val = extractvalue {<2 x i64>, <2 x i1>} %pair, 0 |
| %obit = extractvalue {<2 x i64>, <2 x i1>} %pair, 1 |
| %res = sext <2 x i1> %obit to <2 x i64> |
| store <2 x i64> %val, ptr %ptrval |
| ret <2 x i64> %res |
| } |
| |
| define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_uadd_i64: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s2, s0, s2 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s1 |
| ; CHECK-NEXT: s_addc_u32 s3, s1, s3 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s0 |
| ; CHECK-NEXT: v_mov_b32_e32 v5, s3 |
| ; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] |
| ; CHECK-NEXT: v_mov_b32_e32 v4, s2 |
| ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 %val1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_uadd_p1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s0, s0, 1 |
| ; CHECK-NEXT: s_addc_u32 s1, s1, 0 |
| ; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s1 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s0 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_uadd_n1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s2, s0, -1 |
| ; CHECK-NEXT: s_addc_u32 s3, s1, -1 |
| ; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s2 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s3 |
| ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 |
| ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %val0, i64 -1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_usub_p1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s2, s0, -1 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s1 |
| ; CHECK-NEXT: s_addc_u32 s3, s1, -1 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s0 |
| ; CHECK-NEXT: v_mov_b32_e32 v5, s3 |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3] |
| ; CHECK-NEXT: v_mov_b32_e32 v4, s2 |
| ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |
| |
| define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { |
| ; CHECK-LABEL: s_usub_n1: |
| ; CHECK: ; %bb.0: |
| ; CHECK-NEXT: s_add_u32 s2, s0, 1 |
| ; CHECK-NEXT: v_mov_b32_e32 v3, s1 |
| ; CHECK-NEXT: s_addc_u32 s3, s1, 0 |
| ; CHECK-NEXT: v_mov_b32_e32 v2, s0 |
| ; CHECK-NEXT: v_mov_b32_e32 v5, s3 |
| ; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3] |
| ; CHECK-NEXT: v_mov_b32_e32 v4, s2 |
| ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] |
| ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc |
| ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 |
| ; CHECK-NEXT: s_mov_b32 s1, s0 |
| ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |
| ; CHECK-NEXT: ; return to shader part epilog |
| %pair = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %val0, i64 -1) |
| %val = extractvalue {i64, i1} %pair, 0 |
| %obit = extractvalue {i64, i1} %pair, 1 |
| %res = sext i1 %obit to i64 |
| store i64 %val, ptr %ptrval |
| ret i64 %res |
| } |