| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s |
| |
| define i64 @atomic_shl1_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %shl, %0 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl2_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB1_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq %rdx, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB1_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: andq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB2_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB2_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: incb %cl |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: andq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %add = add i64 %c, 1 |
| %shl1 = shl nuw i64 1, %add |
| %and = and i64 %0, %shl1 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_small_mask_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $31, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask0_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %shl1 = shl nuw i64 1, %c |
| %and = and i64 %shl1, %0 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask1_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %shl1 = shl nuw i64 1, %rem |
| %and = and i64 %0, %shl1 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask01_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_blsi_xor_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_xor_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: negq %rcx |
| ; CHECK-NEXT: andq %rsi, %rcx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB7_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rdx |
| ; CHECK-NEXT: xorq %rcx, %rdx |
| ; CHECK-NEXT: lock cmpxchgq %rdx, (%rdi) |
| ; CHECK-NEXT: jne .LBB7_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: andq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %0 = atomicrmw xor ptr %v, i64 %and monotonic, align 8 |
| %and3 = and i64 %0, %and |
| ret i64 %and3 |
| } |
| |
| define i64 @atomic_shl1_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB8_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB8_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setae %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = sub i64 -1, %0 |
| %2 = lshr i64 %1, %c |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl2_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB9_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq %rdx, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB9_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rax, %rdx |
| ; CHECK-NEXT: sete %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %shl, %0 |
| %tobool.not = icmp eq i64 %and, 0 |
| %conv = zext i1 %tobool.not to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB10_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB10_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: incb %cl |
| ; CHECK-NEXT: movzbl %cl, %edx |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: btq %rdx, %rax |
| ; CHECK-NEXT: setae %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %add = add i64 %c, 1 |
| %1 = xor i64 %0, -1 |
| %2 = lshr i64 %1, %add |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_small_mask_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $31, %ecx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB11_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB11_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btl %ecx, %eax |
| ; CHECK-NEXT: setae %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = xor i64 %0, -1 |
| %2 = lshr i64 %1, %rem |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask0_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB12_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB12_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setae %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = xor i64 %0, -1 |
| %2 = lshr i64 %1, %c |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask1_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB13_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB13_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setae %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %1 = xor i64 %0, -1 |
| %2 = lshr i64 %1, %rem |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask01_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB14_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB14_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setae %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = xor i64 %0, -1 |
| %2 = lshr i64 %1, %rem |
| %conv = and i64 %2, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_blsi_xor_64_gpr_valz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_xor_64_gpr_valz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rdx |
| ; CHECK-NEXT: negq %rdx |
| ; CHECK-NEXT: andq %rsi, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB15_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq %rdx, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB15_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rdx, %rax |
| ; CHECK-NEXT: sete %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %0 = atomicrmw xor ptr %v, i64 %and monotonic, align 8 |
| %and3 = and i64 %0, %and |
| %tobool.not = icmp eq i64 %and3, 0 |
| %conv = zext i1 %tobool.not to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB16_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB16_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = lshr i64 %0, %c |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl2_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB17_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq %rdx, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB17_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rdx, %rax |
| ; CHECK-NEXT: setne %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %and = and i64 %0, %shl |
| %tobool = icmp ne i64 %and, 0 |
| %conv = zext i1 %tobool to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB18_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB18_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: incb %cl |
| ; CHECK-NEXT: movzbl %cl, %edx |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: btq %rdx, %rax |
| ; CHECK-NEXT: setb %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %add = add i64 %c, 1 |
| %1 = lshr i64 %0, %add |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_small_mask_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $31, %ecx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB19_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB19_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btl %ecx, %eax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask0_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB20_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB20_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = lshr i64 %0, %c |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask1_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB21_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB21_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask01_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB22_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: xorq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB22_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %0 = atomicrmw xor ptr %v, i64 %shl monotonic, align 8 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_blsi_xor_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_xor_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rdx |
| ; CHECK-NEXT: negq %rdx |
| ; CHECK-NEXT: andq %rsi, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB23_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq %rdx, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB23_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rdx, %rax |
| ; CHECK-NEXT: setne %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %0 = atomicrmw xor ptr %v, i64 %and monotonic, align 8 |
| %and3 = and i64 %0, %and |
| %tobool = icmp ne i64 %and3, 0 |
| %conv = zext i1 %tobool to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btrq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = sub i64 -1, %shl |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl2_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq %rdx, %rcx |
| ; CHECK-NEXT: notq %rcx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB25_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rcx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB25_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: andq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_neq_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB26_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB26_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: incb %cl |
| ; CHECK-NEXT: movl $1, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: andq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %add = add i64 %c, 1 |
| %shl1 = shl nuw i64 1, %add |
| %and = and i64 %0, %shl1 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_small_mask_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $31, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btrq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask0_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btrq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = sub i64 -1, %shl |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %shl1 = shl nuw i64 1, %c |
| %and = and i64 %0, %shl1 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask1_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btrq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %shl1 = shl nuw i64 1, %rem |
| %and = and i64 %0, %shl1 |
| ret i64 %and |
| } |
| |
| define i64 @atomic_shl1_mask01_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $63, %ecx |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btrq %rcx, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| ret i64 %and |
| } |
| |
| define i64 @atomic_blsi_and_64_gpr_val(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_and_64_gpr_val: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: negq %rcx |
| ; CHECK-NEXT: andq %rsi, %rcx |
| ; CHECK-NEXT: movq %rcx, %rdx |
| ; CHECK-NEXT: notq %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB31_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB31_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: andq %rax, %rcx |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %not = xor i64 %and, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and3 = and i64 %and, %0 |
| ret i64 %and3 |
| } |
| |
| define i64 @atomic_shl1_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB32_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB32_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %1 = lshr i64 %0, %c |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl2_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq %rdx, %rcx |
| ; CHECK-NEXT: notq %rcx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB33_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rcx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB33_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rax, %rdx |
| ; CHECK-NEXT: setne %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %shl, %0 |
| %tobool = icmp ne i64 %and, 0 |
| %conv = zext i1 %tobool to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_neq_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB34_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB34_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: incb %cl |
| ; CHECK-NEXT: movzbl %cl, %edx |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: btq %rdx, %rax |
| ; CHECK-NEXT: setb %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %add = add i64 %c, 1 |
| %1 = lshr i64 %0, %add |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_small_mask_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: andl $31, %ecx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB35_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB35_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btl %ecx, %eax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask0_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB36_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB36_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %1 = lshr i64 %0, %c |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask1_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB37_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB37_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %edx, %edx |
| ; CHECK-NEXT: btq %rcx, %rax |
| ; CHECK-NEXT: setb %dl |
| ; CHECK-NEXT: movq %rdx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_mask01_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl %ecx, %edx |
| ; CHECK-NEXT: andl $63, %edx |
| ; CHECK-NEXT: movq $-2, %rsi |
| ; CHECK-NEXT: # kill: def $cl killed $cl killed $rcx |
| ; CHECK-NEXT: rolq %cl, %rsi |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB38_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: andq %rsi, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB38_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: btq %rdx, %rax |
| ; CHECK-NEXT: setb %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %1 = lshr i64 %0, %rem |
| %conv = and i64 %1, 1 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_blsi_and_64_gpr_valnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_and_64_gpr_valnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rdx |
| ; CHECK-NEXT: negq %rdx |
| ; CHECK-NEXT: andq %rsi, %rdx |
| ; CHECK-NEXT: movq %rdx, %rcx |
| ; CHECK-NEXT: notq %rcx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB39_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rcx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB39_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: xorl %ecx, %ecx |
| ; CHECK-NEXT: testq %rdx, %rax |
| ; CHECK-NEXT: setne %cl |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %not = xor i64 %and, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and3 = and i64 %0, %and |
| %tobool = icmp ne i64 %and3, 0 |
| %conv = zext i1 %tobool to i64 |
| ret i64 %conv |
| } |
| |
| define i64 @atomic_shl1_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movl %esi, %eax |
| ; CHECK-NEXT: andl $63, %eax |
| ; CHECK-NEXT: lock btrq %rax, (%rdi) |
| ; CHECK-NEXT: jae .LBB40_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB40_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %shl, %0 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl2_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl2_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movl $2, %edx |
| ; CHECK-NEXT: shlq %cl, %rdx |
| ; CHECK-NEXT: movq %rdx, %rsi |
| ; CHECK-NEXT: notq %rsi |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB41_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %r8 |
| ; CHECK-NEXT: andq %rsi, %r8 |
| ; CHECK-NEXT: lock cmpxchgq %r8, (%rdi) |
| ; CHECK-NEXT: jne .LBB41_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: testq %rdx, %rax |
| ; CHECK-NEXT: je .LBB41_3 |
| ; CHECK-NEXT: # %bb.4: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rcx,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB41_3: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl i64 2, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_neq_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: movq $-2, %rdx |
| ; CHECK-NEXT: rolq %cl, %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB42_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rsi |
| ; CHECK-NEXT: andq %rdx, %rsi |
| ; CHECK-NEXT: lock cmpxchgq %rsi, (%rdi) |
| ; CHECK-NEXT: jne .LBB42_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: leal 1(%rcx), %edx |
| ; CHECK-NEXT: movzbl %dl, %edx |
| ; CHECK-NEXT: btq %rdx, %rax |
| ; CHECK-NEXT: jae .LBB42_3 |
| ; CHECK-NEXT: # %bb.4: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rcx,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB42_3: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %add = add i64 %c, 1 |
| %shl1 = shl nuw i64 1, %add |
| %and = and i64 %0, %shl1 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_small_mask_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_small_mask_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: andl $31, %esi |
| ; CHECK-NEXT: lock btrq %rsi, (%rdi) |
| ; CHECK-NEXT: jae .LBB43_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB43_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 31 |
| %shl = shl nuw nsw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %rem |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_mask0_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask0_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movl %esi, %eax |
| ; CHECK-NEXT: andl $63, %eax |
| ; CHECK-NEXT: lock btrq %rax, (%rdi) |
| ; CHECK-NEXT: jae .LBB44_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB44_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %shl1 = shl nuw i64 1, %c |
| %and = and i64 %shl1, %0 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_mask1_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask1_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movl %esi, %eax |
| ; CHECK-NEXT: andl $63, %eax |
| ; CHECK-NEXT: lock btrq %rax, (%rdi) |
| ; CHECK-NEXT: jae .LBB45_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB45_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %shl = shl nuw i64 1, %c |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %rem = and i64 %c, 63 |
| %shl1 = shl nuw i64 1, %rem |
| %and = and i64 %0, %shl1 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_mask01_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_shl1_mask01_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movl %esi, %eax |
| ; CHECK-NEXT: andl $63, %eax |
| ; CHECK-NEXT: lock btrq %rax, (%rdi) |
| ; CHECK-NEXT: jae .LBB46_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB46_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %rem = and i64 %c, 63 |
| %shl = shl nuw i64 1, %rem |
| %not = xor i64 %shl, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and = and i64 %0, %shl |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_blsi_and_64_gpr_brnz(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_blsi_and_64_gpr_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rsi, %rcx |
| ; CHECK-NEXT: negq %rcx |
| ; CHECK-NEXT: andq %rsi, %rcx |
| ; CHECK-NEXT: movq %rcx, %rdx |
| ; CHECK-NEXT: notq %rdx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB47_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %r8 |
| ; CHECK-NEXT: andq %rdx, %r8 |
| ; CHECK-NEXT: lock cmpxchgq %r8, (%rdi) |
| ; CHECK-NEXT: jne .LBB47_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: testq %rcx, %rax |
| ; CHECK-NEXT: je .LBB47_3 |
| ; CHECK-NEXT: # %bb.4: # %if.then |
| ; CHECK-NEXT: movq (%rdi,%rsi,8), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB47_3: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %sub = sub i64 0, %c |
| %and = and i64 %sub, %c |
| %not = xor i64 %and, -1 |
| %0 = atomicrmw and ptr %v, i64 %not monotonic, align 8 |
| %and3 = and i64 %0, %and |
| %tobool.not = icmp eq i64 %and3, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 %c |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_xor_64_const_br(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_const_br: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq $4, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: shlq $4, %rax |
| ; CHECK-NEXT: je .LBB48_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB48_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 %0, 16 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_const_br(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_br: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB49_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq $16, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB49_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: movl $123, %ecx |
| ; CHECK-NEXT: testb $32, %al |
| ; CHECK-NEXT: je .LBB49_4 |
| ; CHECK-NEXT: # %bb.3: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rcx |
| ; CHECK-NEXT: .LBB49_4: # %return |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 %0, 32 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_xor_64_const_brz(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_const_brz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq $4, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: shlq $4, %rax |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: je .LBB50_1 |
| ; CHECK-NEXT: # %bb.2: # %return |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB50_1: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 16, %0 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %if.then, label %return |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_const_brz(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_brz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB51_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq $16, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB51_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: movl $123, %ecx |
| ; CHECK-NEXT: testb $32, %al |
| ; CHECK-NEXT: jne .LBB51_4 |
| ; CHECK-NEXT: # %bb.3: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rcx |
| ; CHECK-NEXT: .LBB51_4: # %return |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 %0, 32 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %if.then, label %return |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_xor_64_const_brnz(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_xor_64_const_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: xorl %eax, %eax |
| ; CHECK-NEXT: lock btcq $4, (%rdi) |
| ; CHECK-NEXT: setb %al |
| ; CHECK-NEXT: shlq $4, %rax |
| ; CHECK-NEXT: je .LBB52_1 |
| ; CHECK-NEXT: # %bb.2: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rax |
| ; CHECK-NEXT: retq |
| ; CHECK-NEXT: .LBB52_1: |
| ; CHECK-NEXT: movl $123, %eax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 %0, 16 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_shl1_neq_xor_64_const_brnz(ptr %v) nounwind { |
| ; CHECK-LABEL: atomic_shl1_neq_xor_64_const_brnz: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB53_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rcx |
| ; CHECK-NEXT: xorq $16, %rcx |
| ; CHECK-NEXT: lock cmpxchgq %rcx, (%rdi) |
| ; CHECK-NEXT: jne .LBB53_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: movl $123, %ecx |
| ; CHECK-NEXT: testb $32, %al |
| ; CHECK-NEXT: je .LBB53_4 |
| ; CHECK-NEXT: # %bb.3: # %if.then |
| ; CHECK-NEXT: movq 32(%rdi), %rcx |
| ; CHECK-NEXT: .LBB53_4: # %return |
| ; CHECK-NEXT: movq %rcx, %rax |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = atomicrmw xor ptr %v, i64 16 monotonic, align 8 |
| %and = and i64 %0, 32 |
| %tobool.not = icmp eq i64 %and, 0 |
| br i1 %tobool.not, label %return, label %if.then |
| |
| if.then: ; preds = %entry |
| %arrayidx = getelementptr inbounds i64, ptr %v, i64 4 |
| %1 = load i64, ptr %arrayidx, align 8 |
| br label %return |
| |
| return: ; preds = %entry, %if.then |
| %retval.0 = phi i64 [ %1, %if.then ], [ 123, %entry ] |
| ret i64 %retval.0 |
| } |
| |
| define i64 @atomic_and_with_not_arg(ptr %v, i64 %c) nounwind { |
| ; CHECK-LABEL: atomic_and_with_not_arg: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq $-1, %rcx |
| ; CHECK-NEXT: movq (%rdi), %rax |
| ; CHECK-NEXT: .p2align 4, 0x90 |
| ; CHECK-NEXT: .LBB54_1: # %atomicrmw.start |
| ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: movq %rax, %rdx |
| ; CHECK-NEXT: orq %rcx, %rdx |
| ; CHECK-NEXT: lock cmpxchgq %rdx, (%rdi) |
| ; CHECK-NEXT: jne .LBB54_1 |
| ; CHECK-NEXT: # %bb.2: # %atomicrmw.end |
| ; CHECK-NEXT: retq |
| entry: |
| %0 = xor i64 0, -1 |
| %1 = atomicrmw or ptr %v, i64 %0 monotonic, align 8 |
| ret i64 %1 |
| } |