| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -march=hexagon -opaque-pointers < %s | FileCheck %s |
| |
| %s.0 = type { i8 } |
| @g0 = internal global i8 0, align 1 |
| |
| define void @f0() #0 { |
| ; CHECK-LABEL: f0: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r29 = add(r29,#-8) |
| ; CHECK-NEXT: r1 = #255 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r0 = add(r29,#7) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = and(r0,#3) |
| ; CHECK-NEXT: r0 = and(r0,#-4) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = asl(r2,#3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r1 = asl(r1,r2) |
| ; CHECK-NEXT: r2 = lsl(#2,r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = sub(#-1,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB0_1: // %atomicrmw.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r4 = memw_locked(r0) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = and(r4,r3) |
| ; CHECK-NEXT: r4 = add(r4,r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 |= and(r4,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r0,p0) = r5 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB0_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.2: // %atomicrmw.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r29 = add(r29,#8) |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| %v0 = alloca %s.0 |
| %v1 = getelementptr %s.0, %s.0* %v0, i32 0, i32 0 |
| atomicrmw add i8* %v1, i8 2 monotonic |
| ret void |
| } |
| |
| define void @f1() #0 { |
| ; CHECK-LABEL: f1: |
| ; CHECK: .cfi_startproc |
| ; CHECK-NEXT: // %bb.0: // %entry |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r2 = ##g0 |
| ; CHECK-NEXT: r0 = #255 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r1 = and(r2,#3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r1 = asl(r1,#3) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r4 = r1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r4 = insert(r2,#2,#3) |
| ; CHECK-NEXT: r2 = and(r2,#-4) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r3 = lsl(#1,r4) |
| ; CHECK-NEXT: r4 = asl(r0,r4) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .p2align 4 |
| ; CHECK-NEXT: .LBB1_1: // %cmpxchg.start |
| ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r5 = memw_locked(r2) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 = lsr(r5,r1) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: p0 = !bitsclr(r6,r0) |
| ; CHECK-NEXT: if (p0.new) jumpr:nt r31 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: .LBB1_2: // %cmpxchg.trystore |
| ; CHECK-NEXT: // in Loop: Header=BB1_1 Depth=1 |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 = r3 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: r6 |= and(r5,~r4) |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: memw_locked(r2,p0) = r6 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: if (!p0) jump:nt .LBB1_1 |
| ; CHECK-NEXT: } |
| ; CHECK-NEXT: // %bb.3: // %cmpxchg.end |
| ; CHECK-NEXT: { |
| ; CHECK-NEXT: jumpr r31 |
| ; CHECK-NEXT: } |
| entry: |
| %v0 = cmpxchg volatile i8* @g0, i8 0, i8 1 seq_cst seq_cst |
| ret void |
| } |
| |
| |
| attributes #0 = { "target-cpu"="hexagonv66" } |
| |