| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| ; Test 64-bit atomic minimum and maximum. Here we match the z10 versions, |
| ; which can't use LOCGR. |
| ; |
| ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s |
| |
| ; Todo: If-converter no longer producing CondReturns (with AtomicExpand pass). |
| |
| ; Check signed minimum. |
| define i64 @f1(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB0_2 |
| ; CHECK-NEXT: .LBB0_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB0_4 |
| ; CHECK-NEXT: .LBB0_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r4, .LBB0_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB0_1 |
| ; CHECK-NEXT: .LBB0_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %res = atomicrmw min ptr %src, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check signed maximum. |
| define i64 @f2(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB1_2 |
| ; CHECK-NEXT: .LBB1_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB1_4 |
| ; CHECK-NEXT: .LBB1_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjh %r2, %r4, .LBB1_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB1_1 |
| ; CHECK-NEXT: .LBB1_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %res = atomicrmw max ptr %src, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check unsigned minimum. |
| define i64 @f3(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f3: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB2_2 |
| ; CHECK-NEXT: .LBB2_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB2_4 |
| ; CHECK-NEXT: .LBB2_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: clgrjle %r2, %r4, .LBB2_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB2_1 |
| ; CHECK-NEXT: .LBB2_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %res = atomicrmw umin ptr %src, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check unsigned maximum. |
| define i64 @f4(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f4: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB3_2 |
| ; CHECK-NEXT: .LBB3_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB3_4 |
| ; CHECK-NEXT: .LBB3_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: clgrjh %r2, %r4, .LBB3_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB3_1 |
| ; CHECK-NEXT: .LBB3_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %res = atomicrmw umax ptr %src, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check the high end of the aligned CSG range. |
| define i64 @f5(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f5: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 524280(%r3) |
| ; CHECK-NEXT: j .LBB4_2 |
| ; CHECK-NEXT: .LBB4_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 524280(%r3) |
| ; CHECK-NEXT: je .LBB4_4 |
| ; CHECK-NEXT: .LBB4_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r4, .LBB4_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB4_1 |
| ; CHECK-NEXT: .LBB4_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 65535 |
| %res = atomicrmw min ptr %ptr, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check the next doubleword up, which requires separate address logic. |
| define i64 @f6(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f6: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: agfi %r3, 524288 |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB5_2 |
| ; CHECK-NEXT: .LBB5_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB5_4 |
| ; CHECK-NEXT: .LBB5_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r4, .LBB5_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB5_1 |
| ; CHECK-NEXT: .LBB5_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 65536 |
| %res = atomicrmw min ptr %ptr, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check the low end of the CSG range. |
| define i64 @f7(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f7: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, -524288(%r3) |
| ; CHECK-NEXT: j .LBB6_2 |
| ; CHECK-NEXT: .LBB6_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, -524288(%r3) |
| ; CHECK-NEXT: je .LBB6_4 |
| ; CHECK-NEXT: .LBB6_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r4, .LBB6_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB6_1 |
| ; CHECK-NEXT: .LBB6_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 -65536 |
| %res = atomicrmw min ptr %ptr, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check the next doubleword down, which requires separate address logic. |
| define i64 @f8(i64 %dummy, ptr %src, i64 %b) { |
| ; CHECK-LABEL: f8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: agfi %r3, -524296 |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB7_2 |
| ; CHECK-NEXT: .LBB7_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB7_4 |
| ; CHECK-NEXT: .LBB7_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r4, .LBB7_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r4 |
| ; CHECK-NEXT: j .LBB7_1 |
| ; CHECK-NEXT: .LBB7_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 -65537 |
| %res = atomicrmw min ptr %ptr, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check that indexed addresses are not allowed. |
| define i64 @f9(i64 %dummy, i64 %base, i64 %index, i64 %b) { |
| ; CHECK-LABEL: f9: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r4,%r3) |
| ; CHECK-NEXT: agr %r3, %r4 |
| ; CHECK-NEXT: j .LBB8_2 |
| ; CHECK-NEXT: .LBB8_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB8_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB8_4 |
| ; CHECK-NEXT: .LBB8_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgrjle %r2, %r5, .LBB8_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB8_2 Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r5 |
| ; CHECK-NEXT: j .LBB8_1 |
| ; CHECK-NEXT: .LBB8_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %add = add i64 %base, %index |
| %ptr = inttoptr i64 %add to ptr |
| %res = atomicrmw min ptr %ptr, i64 %b seq_cst |
| ret i64 %res |
| } |
| |
| ; Check that constants are handled. |
| define i64 @f10(i64 %dummy, ptr %ptr) { |
| ; CHECK-LABEL: f10: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lg %r2, 0(%r3) |
| ; CHECK-NEXT: j .LBB9_2 |
| ; CHECK-NEXT: .LBB9_1: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB9_2 Depth=1 |
| ; CHECK-NEXT: csg %r2, %r0, 0(%r3) |
| ; CHECK-NEXT: je .LBB9_4 |
| ; CHECK-NEXT: .LBB9_2: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: lgr %r0, %r2 |
| ; CHECK-NEXT: cgijl %r2, 43, .LBB9_1 |
| ; CHECK-NEXT: # %bb.3: # %atomicrmw.start |
| ; CHECK-NEXT: # in Loop: Header=BB9_2 Depth=1 |
| ; CHECK-NEXT: lghi %r0, 42 |
| ; CHECK-NEXT: j .LBB9_1 |
| ; CHECK-NEXT: .LBB9_4: # %atomicrmw.end |
| ; CHECK-NEXT: br %r14 |
| %res = atomicrmw min ptr %ptr, i64 42 seq_cst |
| ret i64 %res |
| } |