| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=hexagon < %s | FileCheck %s |
| |
| define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_cond_i8: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = and(#24,asl(r0,#3)) |
| ; CHECK-NEXT: r3 = and(r0,#-4) |
| ; CHECK-NEXT: r2 = #255 |
| ; CHECK-NEXT: r4 = and(r1,#255) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = asl(r2,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = sub(#-1,r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB0_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 = memw_locked(r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = lsr(r6,r0) |
| ; CHECK-NEXT: r6 = and(r6,r5) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7 = and(r2,#255) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: p0 = cmp.gtu(r4,r7) |
| ; CHECK-NEXT: if (p0.new) r7 = add(r2,#0) |
| ; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7 = and(r7,#255) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 |= asl(r7,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r3,p0) = r6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB0_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = r2 |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_cond ptr %ptr, i8 %val seq_cst |
| ret i8 %result |
| } |
| |
| define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_cond_i16: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = and(#24,asl(r0,#3)) |
| ; CHECK-NEXT: r3 = and(r0,#-4) |
| ; CHECK-NEXT: r2 = ##65535 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = asl(r2,r0) |
| ; CHECK-NEXT: r4 = zxth(r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = sub(#-1,r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB1_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 = memw_locked(r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = lsr(r6,r0) |
| ; CHECK-NEXT: r6 = and(r6,r5) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7 = zxth(r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: p0 = cmp.gtu(r4,r7) |
| ; CHECK-NEXT: if (p0.new) r7 = add(r2,#0) |
| ; CHECK-NEXT: if (!p0.new) r7 = sub(r2,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7 = zxth(r7) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 |= asl(r7,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r3,p0) = r6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB1_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = r2 |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_cond ptr %ptr, i16 %val seq_cst |
| ret i16 %result |
| } |
| |
| define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_cond_i32: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB2_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = memw_locked(r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: p0 = cmp.gtu(r1,r2) |
| ; CHECK-NEXT: if (p0.new) r3 = add(r2,#0) |
| ; CHECK-NEXT: if (!p0.new) r3 = sub(r2,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r0,p0) = r3 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB2_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = r2 |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_cond ptr %ptr, i32 %val seq_cst |
| ret i32 %result |
| } |
| |
| define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_cond_i64: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB3_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5:4 = memd_locked(r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: p0 = cmp.gtu(r3:2,r5:4) |
| ; CHECK-NEXT: r7:6 = sub(r5:4,r3:2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r8 = mux(p0,r4,r6) |
| ; CHECK-NEXT: r9 = mux(p0,r5,r7) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memd_locked(r0,p0) = r9:8 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB3_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r1:0 = combine(r5,r4) |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_cond ptr %ptr, i64 %val seq_cst |
| ret i64 %result |
| } |
| |
| define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_sat_i8: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = and(#24,asl(r0,#3)) |
| ; CHECK-NEXT: r2 = and(r0,#-4) |
| ; CHECK-NEXT: r3 = #255 |
| ; CHECK-NEXT: r1 = and(r1,#255) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = asl(r3,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = sub(#-1,r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB4_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = #255 |
| ; CHECK-NEXT: r4 = memw_locked(r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 &= lsr(r4,r0) |
| ; CHECK-NEXT: r6 = and(r4,r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = maxu(r5,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = sub(r5,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 |= asl(r5,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r2,p0) = r6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB4_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = lsr(r4,r0) |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_sat ptr %ptr, i8 %val seq_cst |
| ret i8 %result |
| } |
| |
| define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_sat_i16: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = and(#24,asl(r0,#3)) |
| ; CHECK-NEXT: r2 = and(r0,#-4) |
| ; CHECK-NEXT: r3 = ##65535 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = asl(r3,r0) |
| ; CHECK-NEXT: r1 = zxth(r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = sub(#-1,r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB5_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = ##65535 |
| ; CHECK-NEXT: r4 = memw_locked(r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 &= lsr(r4,r0) |
| ; CHECK-NEXT: r6 = and(r4,r3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = maxu(r5,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = sub(r5,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 |= asl(r5,r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r2,p0) = r6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB5_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = lsr(r4,r0) |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_sat ptr %ptr, i16 %val seq_cst |
| ret i16 %result |
| } |
| |
| define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_sat_i32: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB6_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = memw_locked(r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = maxu(r2,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = sub(r3,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r0,p0) = r3 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB6_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = r2 |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_sat ptr %ptr, i32 %val seq_cst |
| ret i32 %result |
| } |
| |
| define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) { |
| ; CHECK-LABEL: atomicrmw_usub_sat_i64: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB7_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5:4 = memd_locked(r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7:6 = maxu(r5:4,r3:2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r7:6 = sub(r7:6,r3:2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memd_locked(r0,p0) = r7:6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB7_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r1:0 = combine(r5,r4) |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %result = atomicrmw usub_sat ptr %ptr, i64 %val seq_cst |
| ret i64 %result |
| } |