| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 |
| |
| define void @xor32_signbit_unused(ptr %p) nounwind { |
| ; X86-LABEL: xor32_signbit_unused: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: lock xorl $-2147483648, (%eax) # imm = 0x80000000 |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor32_signbit_unused: |
| ; X64: # %bb.0: |
| ; X64-NEXT: lock xorl $-2147483648, (%rdi) # imm = 0x80000000 |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i32 2147483648 monotonic |
| ret void |
| } |
| |
| define i128 @xor128_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor128_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: movl %esp, %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: andl $-16, %esp |
| ; X86-NEXT: subl $48, %esp |
| ; X86-NEXT: movl 12(%ebp), %edi |
| ; X86-NEXT: movl 12(%edi), %ecx |
| ; X86-NEXT: movl 8(%edi), %edx |
| ; X86-NEXT: movl (%edi), %ebx |
| ; X86-NEXT: movl 4(%edi), %esi |
| ; X86-NEXT: .p2align 4 |
| ; X86-NEXT: .LBB1_1: # %atomicrmw.start |
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; X86-NEXT: movl %ebx, (%esp) |
| ; X86-NEXT: movl %esi, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %edx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: addl $-2147483648, %ecx # imm = 0x80000000 |
| ; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %edx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %esi, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl %ebx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: pushl $0 |
| ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: pushl %eax |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl $16 |
| ; X86-NEXT: calll __atomic_compare_exchange@PLT |
| ; X86-NEXT: addl $24, %esp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl (%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: testb %al, %al |
| ; X86-NEXT: je .LBB1_1 |
| ; X86-NEXT: # %bb.2: # %atomicrmw.end |
| ; X86-NEXT: movl 8(%ebp), %eax |
| ; X86-NEXT: movl %ebx, (%eax) |
| ; X86-NEXT: movl %esi, 4(%eax) |
| ; X86-NEXT: movl %edx, 8(%eax) |
| ; X86-NEXT: movl %ecx, 12(%eax) |
| ; X86-NEXT: leal -12(%ebp), %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| ; |
| ; X64-LABEL: xor128_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: pushq %rax |
| ; X64-NEXT: movabsq $-9223372036854775808, %rdx # imm = 0x8000000000000000 |
| ; X64-NEXT: xorl %esi, %esi |
| ; X64-NEXT: xorl %ecx, %ecx |
| ; X64-NEXT: callq __atomic_fetch_xor_16@PLT |
| ; X64-NEXT: popq %rcx |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i128 170141183460469231731687303715884105728 monotonic |
| ret i128 %r |
| } |
| |
| define i64 @xor64_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor64_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl (%esi), %eax |
| ; X86-NEXT: movl 4(%esi), %edx |
| ; X86-NEXT: .p2align 4 |
| ; X86-NEXT: .LBB2_1: # %atomicrmw.start |
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; X86-NEXT: leal -2147483648(%edx), %ecx |
| ; X86-NEXT: movl %eax, %ebx |
| ; X86-NEXT: lock cmpxchg8b (%esi) |
| ; X86-NEXT: jne .LBB2_1 |
| ; X86-NEXT: # %bb.2: # %atomicrmw.end |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor64_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 |
| ; X64-NEXT: lock xaddq %rax, (%rdi) |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i64 9223372036854775808 monotonic |
| ret i64 %r |
| } |
| |
| define i32 @xor32_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor32_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000 |
| ; X86-NEXT: lock xaddl %eax, (%ecx) |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor32_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl $-2147483648, %eax # imm = 0x80000000 |
| ; X64-NEXT: lock xaddl %eax, (%rdi) |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i32 2147483648 monotonic |
| ret i32 %r |
| } |
| |
| define i16 @xor16_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor16_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movw $-32768, %ax # imm = 0x8000 |
| ; X86-NEXT: lock xaddw %ax, (%ecx) |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor16_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movw $-32768, %ax # imm = 0x8000 |
| ; X64-NEXT: lock xaddw %ax, (%rdi) |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i16 32768 monotonic |
| ret i16 %r |
| } |
| |
| define i8 @xor8_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor8_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movb $-128, %al |
| ; X86-NEXT: lock xaddb %al, (%ecx) |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor8_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movb $-128, %al |
| ; X64-NEXT: lock xaddb %al, (%rdi) |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i8 128 monotonic |
| ret i8 %r |
| } |
| |
| define i32 @xor32_not_signbit_used(ptr %p) nounwind { |
| ; X86-LABEL: xor32_not_signbit_used: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl (%ecx), %eax |
| ; X86-NEXT: .p2align 4 |
| ; X86-NEXT: .LBB6_1: # %atomicrmw.start |
| ; X86-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; X86-NEXT: movl %eax, %edx |
| ; X86-NEXT: xorl $-2147483647, %edx # imm = 0x80000001 |
| ; X86-NEXT: lock cmpxchgl %edx, (%ecx) |
| ; X86-NEXT: jne .LBB6_1 |
| ; X86-NEXT: # %bb.2: # %atomicrmw.end |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: xor32_not_signbit_used: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl (%rdi), %eax |
| ; X64-NEXT: .p2align 4 |
| ; X64-NEXT: .LBB6_1: # %atomicrmw.start |
| ; X64-NEXT: # =>This Inner Loop Header: Depth=1 |
| ; X64-NEXT: movl %eax, %ecx |
| ; X64-NEXT: xorl $-2147483647, %ecx # imm = 0x80000001 |
| ; X64-NEXT: lock cmpxchgl %ecx, (%rdi) |
| ; X64-NEXT: jne .LBB6_1 |
| ; X64-NEXT: # %bb.2: # %atomicrmw.end |
| ; X64-NEXT: retq |
| %r = atomicrmw xor ptr %p, i32 2147483649 monotonic |
| ret i32 %r |
| } |