blob: c9fd90bb8c3478ffab88677c904d5ee5e964ce50 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=csky -verify-machineinstrs -csky-no-aliases -mattr=+2e3 < %s \
; RUN: | FileCheck -check-prefix=CSKY %s
define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI0_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI0_0:
; CSKY-NEXT: .long __atomic_exchange_1
%1 = atomicrmw xchg i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI1_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI1_0:
; CSKY-NEXT: .long __atomic_exchange_1
%1 = atomicrmw xchg i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI2_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI2_0:
; CSKY-NEXT: .long __atomic_exchange_1
%1 = atomicrmw xchg i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI3_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI3_0:
; CSKY-NEXT: .long __atomic_exchange_1
%1 = atomicrmw xchg i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI4_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI4_0:
; CSKY-NEXT: .long __atomic_exchange_1
%1 = atomicrmw xchg i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI5_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI5_0:
; CSKY-NEXT: .long __atomic_fetch_add_1
%1 = atomicrmw add i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI6_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI6_0:
; CSKY-NEXT: .long __atomic_fetch_add_1
%1 = atomicrmw add i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI7_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI7_0:
; CSKY-NEXT: .long __atomic_fetch_add_1
%1 = atomicrmw add i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI8_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI8_0:
; CSKY-NEXT: .long __atomic_fetch_add_1
%1 = atomicrmw add i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI9_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI9_0:
; CSKY-NEXT: .long __atomic_fetch_add_1
%1 = atomicrmw add i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI10_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI10_0:
; CSKY-NEXT: .long __atomic_fetch_sub_1
%1 = atomicrmw sub i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI11_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI11_0:
; CSKY-NEXT: .long __atomic_fetch_sub_1
%1 = atomicrmw sub i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI12_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI12_0:
; CSKY-NEXT: .long __atomic_fetch_sub_1
%1 = atomicrmw sub i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI13_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI13_0:
; CSKY-NEXT: .long __atomic_fetch_sub_1
%1 = atomicrmw sub i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI14_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI14_0:
; CSKY-NEXT: .long __atomic_fetch_sub_1
%1 = atomicrmw sub i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI15_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI15_0:
; CSKY-NEXT: .long __atomic_fetch_and_1
%1 = atomicrmw and i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI16_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI16_0:
; CSKY-NEXT: .long __atomic_fetch_and_1
%1 = atomicrmw and i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI17_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI17_0:
; CSKY-NEXT: .long __atomic_fetch_and_1
%1 = atomicrmw and i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI18_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI18_0:
; CSKY-NEXT: .long __atomic_fetch_and_1
%1 = atomicrmw and i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI19_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI19_0:
; CSKY-NEXT: .long __atomic_fetch_and_1
%1 = atomicrmw and i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI20_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI20_0:
; CSKY-NEXT: .long __atomic_fetch_nand_1
%1 = atomicrmw nand i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI21_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI21_0:
; CSKY-NEXT: .long __atomic_fetch_nand_1
%1 = atomicrmw nand i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI22_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI22_0:
; CSKY-NEXT: .long __atomic_fetch_nand_1
%1 = atomicrmw nand i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI23_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI23_0:
; CSKY-NEXT: .long __atomic_fetch_nand_1
%1 = atomicrmw nand i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI24_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI24_0:
; CSKY-NEXT: .long __atomic_fetch_nand_1
%1 = atomicrmw nand i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI25_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI25_0:
; CSKY-NEXT: .long __atomic_fetch_or_1
%1 = atomicrmw or i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI26_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI26_0:
; CSKY-NEXT: .long __atomic_fetch_or_1
%1 = atomicrmw or i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI27_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI27_0:
; CSKY-NEXT: .long __atomic_fetch_or_1
%1 = atomicrmw or i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI28_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI28_0:
; CSKY-NEXT: .long __atomic_fetch_or_1
%1 = atomicrmw or i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI29_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI29_0:
; CSKY-NEXT: .long __atomic_fetch_or_1
%1 = atomicrmw or i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI30_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI30_0:
; CSKY-NEXT: .long __atomic_fetch_xor_1
%1 = atomicrmw xor i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI31_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI31_0:
; CSKY-NEXT: .long __atomic_fetch_xor_1
%1 = atomicrmw xor i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI32_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI32_0:
; CSKY-NEXT: .long __atomic_fetch_xor_1
%1 = atomicrmw xor i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI33_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI33_0:
; CSKY-NEXT: .long __atomic_fetch_xor_1
%1 = atomicrmw xor i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI34_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI34_0:
; CSKY-NEXT: .long __atomic_fetch_xor_1
%1 = atomicrmw xor i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB35_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI35_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB35_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI35_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw max i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB36_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI36_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB36_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI36_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw max i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB37_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI37_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB37_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI37_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw max i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB38_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI38_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB38_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI38_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw max i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB39_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI39_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB39_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI39_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw max i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB40_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI40_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB40_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI40_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw min i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB41_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI41_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB41_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI41_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw min i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB42_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI42_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB42_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI42_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw min i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB43_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI43_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB43_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI43_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw min i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: sextb16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB44_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sextb16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI44_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB44_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI44_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw min i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB45_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI45_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB45_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI45_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umax i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB46_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI46_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB46_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI46_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umax i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB47_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI47_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB47_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI47_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umax i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB48_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI48_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB48_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI48_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umax i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB49_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI49_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB49_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI49_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umax i8* %a, i8 %b seq_cst
ret i8 %1
}
define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i8_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB50_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI50_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB50_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI50_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umin i8* %a, i8 %b monotonic
ret i8 %1
}
define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i8_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB51_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI51_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB51_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI51_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umin i8* %a, i8 %b acquire
ret i8 %1
}
define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i8_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB52_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI52_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB52_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI52_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umin i8* %a, i8 %b release
ret i8 %1
}
define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i8_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB53_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI53_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB53_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI53_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umin i8* %a, i8 %b acq_rel
ret i8 %1
}
define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i8_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.b a0, (a0, 0)
; CSKY-NEXT: zextb16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB54_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zextb16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.b a0, (sp, 7)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI54_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.b a0, (sp, 7)
; CSKY-NEXT: bez32 a1, .LBB54_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI54_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
%1 = atomicrmw umin i8* %a, i8 %b seq_cst
ret i8 %1
}
define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI55_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI55_0:
; CSKY-NEXT: .long __atomic_exchange_2
%1 = atomicrmw xchg i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI56_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI56_0:
; CSKY-NEXT: .long __atomic_exchange_2
%1 = atomicrmw xchg i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI57_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI57_0:
; CSKY-NEXT: .long __atomic_exchange_2
%1 = atomicrmw xchg i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI58_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI58_0:
; CSKY-NEXT: .long __atomic_exchange_2
%1 = atomicrmw xchg i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI59_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI59_0:
; CSKY-NEXT: .long __atomic_exchange_2
%1 = atomicrmw xchg i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI60_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI60_0:
; CSKY-NEXT: .long __atomic_fetch_add_2
%1 = atomicrmw add i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI61_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI61_0:
; CSKY-NEXT: .long __atomic_fetch_add_2
%1 = atomicrmw add i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI62_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI62_0:
; CSKY-NEXT: .long __atomic_fetch_add_2
%1 = atomicrmw add i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI63_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI63_0:
; CSKY-NEXT: .long __atomic_fetch_add_2
%1 = atomicrmw add i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI64_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI64_0:
; CSKY-NEXT: .long __atomic_fetch_add_2
%1 = atomicrmw add i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI65_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI65_0:
; CSKY-NEXT: .long __atomic_fetch_sub_2
%1 = atomicrmw sub i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI66_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI66_0:
; CSKY-NEXT: .long __atomic_fetch_sub_2
%1 = atomicrmw sub i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI67_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI67_0:
; CSKY-NEXT: .long __atomic_fetch_sub_2
%1 = atomicrmw sub i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI68_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI68_0:
; CSKY-NEXT: .long __atomic_fetch_sub_2
%1 = atomicrmw sub i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI69_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI69_0:
; CSKY-NEXT: .long __atomic_fetch_sub_2
%1 = atomicrmw sub i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI70_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI70_0:
; CSKY-NEXT: .long __atomic_fetch_and_2
%1 = atomicrmw and i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI71_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI71_0:
; CSKY-NEXT: .long __atomic_fetch_and_2
%1 = atomicrmw and i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI72_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI72_0:
; CSKY-NEXT: .long __atomic_fetch_and_2
%1 = atomicrmw and i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI73_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI73_0:
; CSKY-NEXT: .long __atomic_fetch_and_2
%1 = atomicrmw and i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI74_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI74_0:
; CSKY-NEXT: .long __atomic_fetch_and_2
%1 = atomicrmw and i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI75_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI75_0:
; CSKY-NEXT: .long __atomic_fetch_nand_2
%1 = atomicrmw nand i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI76_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI76_0:
; CSKY-NEXT: .long __atomic_fetch_nand_2
%1 = atomicrmw nand i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI77_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI77_0:
; CSKY-NEXT: .long __atomic_fetch_nand_2
%1 = atomicrmw nand i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI78_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI78_0:
; CSKY-NEXT: .long __atomic_fetch_nand_2
%1 = atomicrmw nand i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI79_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI79_0:
; CSKY-NEXT: .long __atomic_fetch_nand_2
%1 = atomicrmw nand i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI80_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI80_0:
; CSKY-NEXT: .long __atomic_fetch_or_2
%1 = atomicrmw or i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI81_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI81_0:
; CSKY-NEXT: .long __atomic_fetch_or_2
%1 = atomicrmw or i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI82_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI82_0:
; CSKY-NEXT: .long __atomic_fetch_or_2
%1 = atomicrmw or i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI83_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI83_0:
; CSKY-NEXT: .long __atomic_fetch_or_2
%1 = atomicrmw or i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI84_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI84_0:
; CSKY-NEXT: .long __atomic_fetch_or_2
%1 = atomicrmw or i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI85_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI85_0:
; CSKY-NEXT: .long __atomic_fetch_xor_2
%1 = atomicrmw xor i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI86_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI86_0:
; CSKY-NEXT: .long __atomic_fetch_xor_2
%1 = atomicrmw xor i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI87_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI87_0:
; CSKY-NEXT: .long __atomic_fetch_xor_2
%1 = atomicrmw xor i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI88_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI88_0:
; CSKY-NEXT: .long __atomic_fetch_xor_2
%1 = atomicrmw xor i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI89_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI89_0:
; CSKY-NEXT: .long __atomic_fetch_xor_2
%1 = atomicrmw xor i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB90_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI90_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB90_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI90_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw max i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB91_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI91_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB91_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI91_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw max i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB92_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI92_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB92_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI92_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw max i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB93_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI93_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB93_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI93_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw max i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB94_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI94_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB94_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI94_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw max i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB95_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI95_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB95_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI95_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw min i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB96_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI96_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB96_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI96_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw min i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB97_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI97_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB97_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI97_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw min i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB98_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI98_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB98_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI98_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw min i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: sexth16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB99_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: sexth16 a1, a0
; CSKY-NEXT: cmplt16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI99_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB99_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI99_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw min i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB100_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI100_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB100_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI100_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umax i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB101_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI101_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB101_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI101_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umax i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB102_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI102_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB102_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI102_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umax i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB103_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI103_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB103_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI103_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umax i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB104_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI104_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB104_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI104_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umax i16* %a, i16 %b seq_cst
ret i16 %1
}
define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i16_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB105_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI105_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB105_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI105_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umin i16* %a, i16 %b monotonic
ret i16 %1
}
define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i16_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB106_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI106_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB106_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI106_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umin i16* %a, i16 %b acquire
ret i16 %1
}
define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i16_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB107_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI107_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB107_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI107_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umin i16* %a, i16 %b release
ret i16 %1
}
define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i16_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB108_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI108_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB108_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI108_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umin i16* %a, i16 %b acq_rel
ret i16 %1
}
define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i16_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.h a0, (a0, 0)
; CSKY-NEXT: zexth16 l2, a1
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB109_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: zexth16 a1, a0
; CSKY-NEXT: cmphs16 l2, a1
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st32.h a0, (sp, 6)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI109_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld32.h a0, (sp, 6)
; CSKY-NEXT: bez32 a1, .LBB109_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI109_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
%1 = atomicrmw umin i16* %a, i16 %b seq_cst
ret i16 %1
}
define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI110_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI110_0:
; CSKY-NEXT: .long __atomic_exchange_4
%1 = atomicrmw xchg i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI111_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI111_0:
; CSKY-NEXT: .long __atomic_exchange_4
%1 = atomicrmw xchg i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI112_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI112_0:
; CSKY-NEXT: .long __atomic_exchange_4
%1 = atomicrmw xchg i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI113_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI113_0:
; CSKY-NEXT: .long __atomic_exchange_4
%1 = atomicrmw xchg i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI114_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI114_0:
; CSKY-NEXT: .long __atomic_exchange_4
%1 = atomicrmw xchg i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI115_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI115_0:
; CSKY-NEXT: .long __atomic_fetch_add_4
%1 = atomicrmw add i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI116_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI116_0:
; CSKY-NEXT: .long __atomic_fetch_add_4
%1 = atomicrmw add i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI117_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI117_0:
; CSKY-NEXT: .long __atomic_fetch_add_4
%1 = atomicrmw add i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI118_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI118_0:
; CSKY-NEXT: .long __atomic_fetch_add_4
%1 = atomicrmw add i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI119_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI119_0:
; CSKY-NEXT: .long __atomic_fetch_add_4
%1 = atomicrmw add i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI120_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI120_0:
; CSKY-NEXT: .long __atomic_fetch_sub_4
%1 = atomicrmw sub i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI121_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI121_0:
; CSKY-NEXT: .long __atomic_fetch_sub_4
%1 = atomicrmw sub i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI122_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI122_0:
; CSKY-NEXT: .long __atomic_fetch_sub_4
%1 = atomicrmw sub i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI123_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI123_0:
; CSKY-NEXT: .long __atomic_fetch_sub_4
%1 = atomicrmw sub i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI124_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI124_0:
; CSKY-NEXT: .long __atomic_fetch_sub_4
%1 = atomicrmw sub i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI125_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI125_0:
; CSKY-NEXT: .long __atomic_fetch_and_4
%1 = atomicrmw and i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI126_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI126_0:
; CSKY-NEXT: .long __atomic_fetch_and_4
%1 = atomicrmw and i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI127_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI127_0:
; CSKY-NEXT: .long __atomic_fetch_and_4
%1 = atomicrmw and i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI128_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI128_0:
; CSKY-NEXT: .long __atomic_fetch_and_4
%1 = atomicrmw and i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI129_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI129_0:
; CSKY-NEXT: .long __atomic_fetch_and_4
%1 = atomicrmw and i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI130_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI130_0:
; CSKY-NEXT: .long __atomic_fetch_nand_4
%1 = atomicrmw nand i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI131_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI131_0:
; CSKY-NEXT: .long __atomic_fetch_nand_4
%1 = atomicrmw nand i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI132_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI132_0:
; CSKY-NEXT: .long __atomic_fetch_nand_4
%1 = atomicrmw nand i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI133_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI133_0:
; CSKY-NEXT: .long __atomic_fetch_nand_4
%1 = atomicrmw nand i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI134_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI134_0:
; CSKY-NEXT: .long __atomic_fetch_nand_4
%1 = atomicrmw nand i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI135_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI135_0:
; CSKY-NEXT: .long __atomic_fetch_or_4
%1 = atomicrmw or i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI136_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI136_0:
; CSKY-NEXT: .long __atomic_fetch_or_4
%1 = atomicrmw or i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI137_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI137_0:
; CSKY-NEXT: .long __atomic_fetch_or_4
%1 = atomicrmw or i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI138_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI138_0:
; CSKY-NEXT: .long __atomic_fetch_or_4
%1 = atomicrmw or i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI139_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI139_0:
; CSKY-NEXT: .long __atomic_fetch_or_4
%1 = atomicrmw or i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 0
; CSKY-NEXT: jsri32 [.LCPI140_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI140_0:
; CSKY-NEXT: .long __atomic_fetch_xor_4
%1 = atomicrmw xor i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 2
; CSKY-NEXT: jsri32 [.LCPI141_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI141_0:
; CSKY-NEXT: .long __atomic_fetch_xor_4
%1 = atomicrmw xor i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 3
; CSKY-NEXT: jsri32 [.LCPI142_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI142_0:
; CSKY-NEXT: .long __atomic_fetch_xor_4
%1 = atomicrmw xor i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 4
; CSKY-NEXT: jsri32 [.LCPI143_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI143_0:
; CSKY-NEXT: .long __atomic_fetch_xor_4
%1 = atomicrmw xor i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a2, 5
; CSKY-NEXT: jsri32 [.LCPI144_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI144_0:
; CSKY-NEXT: .long __atomic_fetch_xor_4
%1 = atomicrmw xor i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB145_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI145_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB145_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI145_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw max i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB146_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI146_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB146_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI146_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw max i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB147_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI147_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB147_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI147_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw max i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB148_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI148_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB148_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI148_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw max i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 5
; CSKY-NEXT: .LBB149_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI149_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB149_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI149_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw max i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB150_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI150_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB150_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI150_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw min i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB151_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI151_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB151_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI151_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw min i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB152_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI152_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB152_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI152_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw min i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB153_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI153_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB153_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI153_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw min i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 5
; CSKY-NEXT: .LBB154_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI154_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB154_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI154_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw min i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB155_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI155_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB155_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI155_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umax i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB156_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI156_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB156_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI156_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umax i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB157_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI157_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB157_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI157_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umax i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB158_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI158_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB158_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI158_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umax i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 5
; CSKY-NEXT: .LBB159_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movf32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI159_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB159_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI159_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umax i32* %a, i32 %b seq_cst
ret i32 %1
}
define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i32_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB160_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI160_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB160_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI160_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umin i32* %a, i32 %b monotonic
ret i32 %1
}
define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i32_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB161_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI161_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB161_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI161_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umin i32* %a, i32 %b acquire
ret i32 %1
}
define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i32_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 0
; CSKY-NEXT: .LBB162_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI162_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB162_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI162_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umin i32* %a, i32 %b release
ret i32 %1
}
define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i32_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 2
; CSKY-NEXT: .LBB163_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI163_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB163_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI163_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umin i32* %a, i32 %b acq_rel
ret i32 %1
}
define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i32_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: mov16 l0, a1
; CSKY-NEXT: mov16 l1, a0
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l2, 5
; CSKY-NEXT: .LBB164_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l0, a0
; CSKY-NEXT: mov16 a2, l0
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: st16.w a0, (sp, 4)
; CSKY-NEXT: st16.w l2, (sp, 0)
; CSKY-NEXT: mov16 a0, l1
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI164_0]
; CSKY-NEXT: mov16 a1, a0
; CSKY-NEXT: ld16.w a0, (sp, 4)
; CSKY-NEXT: bez32 a1, .LBB164_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI164_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
%1 = atomicrmw umin i32* %a, i32 %b seq_cst
ret i32 %1
}
define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI165_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI165_0:
; CSKY-NEXT: .long __atomic_exchange_8
%1 = atomicrmw xchg i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI166_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI166_0:
; CSKY-NEXT: .long __atomic_exchange_8
%1 = atomicrmw xchg i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI167_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI167_0:
; CSKY-NEXT: .long __atomic_exchange_8
%1 = atomicrmw xchg i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI168_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI168_0:
; CSKY-NEXT: .long __atomic_exchange_8
%1 = atomicrmw xchg i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xchg_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI169_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI169_0:
; CSKY-NEXT: .long __atomic_exchange_8
%1 = atomicrmw xchg i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI170_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI170_0:
; CSKY-NEXT: .long __atomic_fetch_add_8
%1 = atomicrmw add i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI171_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI171_0:
; CSKY-NEXT: .long __atomic_fetch_add_8
%1 = atomicrmw add i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI172_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI172_0:
; CSKY-NEXT: .long __atomic_fetch_add_8
%1 = atomicrmw add i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI173_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI173_0:
; CSKY-NEXT: .long __atomic_fetch_add_8
%1 = atomicrmw add i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_add_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI174_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI174_0:
; CSKY-NEXT: .long __atomic_fetch_add_8
%1 = atomicrmw add i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI175_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI175_0:
; CSKY-NEXT: .long __atomic_fetch_sub_8
%1 = atomicrmw sub i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI176_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI176_0:
; CSKY-NEXT: .long __atomic_fetch_sub_8
%1 = atomicrmw sub i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI177_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI177_0:
; CSKY-NEXT: .long __atomic_fetch_sub_8
%1 = atomicrmw sub i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI178_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI178_0:
; CSKY-NEXT: .long __atomic_fetch_sub_8
%1 = atomicrmw sub i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_sub_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI179_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI179_0:
; CSKY-NEXT: .long __atomic_fetch_sub_8
%1 = atomicrmw sub i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI180_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI180_0:
; CSKY-NEXT: .long __atomic_fetch_and_8
%1 = atomicrmw and i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI181_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI181_0:
; CSKY-NEXT: .long __atomic_fetch_and_8
%1 = atomicrmw and i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI182_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI182_0:
; CSKY-NEXT: .long __atomic_fetch_and_8
%1 = atomicrmw and i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI183_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI183_0:
; CSKY-NEXT: .long __atomic_fetch_and_8
%1 = atomicrmw and i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_and_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI184_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI184_0:
; CSKY-NEXT: .long __atomic_fetch_and_8
%1 = atomicrmw and i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI185_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI185_0:
; CSKY-NEXT: .long __atomic_fetch_nand_8
%1 = atomicrmw nand i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI186_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI186_0:
; CSKY-NEXT: .long __atomic_fetch_nand_8
%1 = atomicrmw nand i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI187_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI187_0:
; CSKY-NEXT: .long __atomic_fetch_nand_8
%1 = atomicrmw nand i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI188_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI188_0:
; CSKY-NEXT: .long __atomic_fetch_nand_8
%1 = atomicrmw nand i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_nand_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI189_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI189_0:
; CSKY-NEXT: .long __atomic_fetch_nand_8
%1 = atomicrmw nand i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI190_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI190_0:
; CSKY-NEXT: .long __atomic_fetch_or_8
%1 = atomicrmw or i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI191_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI191_0:
; CSKY-NEXT: .long __atomic_fetch_or_8
%1 = atomicrmw or i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI192_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI192_0:
; CSKY-NEXT: .long __atomic_fetch_or_8
%1 = atomicrmw or i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI193_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI193_0:
; CSKY-NEXT: .long __atomic_fetch_or_8
%1 = atomicrmw or i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_or_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI194_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI194_0:
; CSKY-NEXT: .long __atomic_fetch_or_8
%1 = atomicrmw or i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI195_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI195_0:
; CSKY-NEXT: .long __atomic_fetch_xor_8
%1 = atomicrmw xor i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI196_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI196_0:
; CSKY-NEXT: .long __atomic_fetch_xor_8
%1 = atomicrmw xor i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI197_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI197_0:
; CSKY-NEXT: .long __atomic_fetch_xor_8
%1 = atomicrmw xor i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI198_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI198_0:
; CSKY-NEXT: .long __atomic_fetch_xor_8
%1 = atomicrmw xor i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_xor_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI199_0]
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI199_0:
; CSKY-NEXT: .long __atomic_fetch_xor_8
%1 = atomicrmw xor i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB200_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI200_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB200_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI200_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw max i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB201_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI201_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB201_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI201_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw max i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: movi32 l4, 3
; CSKY-NEXT: .LBB202_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI202_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB202_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI202_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw max i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: movi32 l4, 4
; CSKY-NEXT: .LBB203_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI203_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB203_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI203_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw max i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_max_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB204_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI204_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB204_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI204_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw max i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB205_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a2, a3
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI205_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB205_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI205_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw min i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB206_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a2, a3
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI206_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB206_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI206_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw min i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: movi32 l4, 3
; CSKY-NEXT: .LBB207_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a2, a3
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI207_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB207_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI207_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw min i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: movi32 l4, 4
; CSKY-NEXT: .LBB208_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a2, a3
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI208_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB208_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI208_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw min i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_min_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 28
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB209_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmplt16 l0, a1
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 16)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a2, a3
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 20)
; CSKY-NEXT: st16.w a1, (sp, 24)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 20
; CSKY-NEXT: jsri32 [.LCPI209_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 24)
; CSKY-NEXT: ld16.w a0, (sp, 20)
; CSKY-NEXT: bez32 a2, .LBB209_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 28
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI209_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw min i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB210_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvcv16 a3
; CSKY-NEXT: ld32.w t0, (sp, 12)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 16)
; CSKY-NEXT: st16.w a1, (sp, 20)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 16
; CSKY-NEXT: jsri32 [.LCPI210_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 20)
; CSKY-NEXT: ld16.w a0, (sp, 16)
; CSKY-NEXT: bez32 a2, .LBB210_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI210_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umax i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB211_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvcv16 a3
; CSKY-NEXT: ld32.w t0, (sp, 12)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 16)
; CSKY-NEXT: st16.w a1, (sp, 20)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 16
; CSKY-NEXT: jsri32 [.LCPI211_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 20)
; CSKY-NEXT: ld16.w a0, (sp, 16)
; CSKY-NEXT: bez32 a2, .LBB211_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI211_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umax i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: movi32 l4, 3
; CSKY-NEXT: .LBB212_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvcv16 a3
; CSKY-NEXT: ld32.w t0, (sp, 12)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 16)
; CSKY-NEXT: st16.w a1, (sp, 20)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 16
; CSKY-NEXT: jsri32 [.LCPI212_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 20)
; CSKY-NEXT: ld16.w a0, (sp, 16)
; CSKY-NEXT: bez32 a2, .LBB212_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI212_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umax i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: movi32 l4, 4
; CSKY-NEXT: .LBB213_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvcv16 a3
; CSKY-NEXT: ld32.w t0, (sp, 12)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 16)
; CSKY-NEXT: st16.w a1, (sp, 20)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 16
; CSKY-NEXT: jsri32 [.LCPI213_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 20)
; CSKY-NEXT: ld16.w a0, (sp, 16)
; CSKY-NEXT: bez32 a2, .LBB213_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI213_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umax i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umax_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB214_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvcv16 a2
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvcv16 a3
; CSKY-NEXT: ld32.w t0, (sp, 12)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 16)
; CSKY-NEXT: st16.w a1, (sp, 20)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 16
; CSKY-NEXT: jsri32 [.LCPI214_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 20)
; CSKY-NEXT: ld16.w a0, (sp, 16)
; CSKY-NEXT: bez32 a2, .LBB214_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI214_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umax i64* %a, i64 %b seq_cst
ret i64 %1
}
define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i64_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 32
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: .LBB215_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 20)
; CSKY-NEXT: ld16.w a2, (sp, 16)
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 20)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 24)
; CSKY-NEXT: st16.w a1, (sp, 28)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 24
; CSKY-NEXT: jsri32 [.LCPI215_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 28)
; CSKY-NEXT: ld16.w a0, (sp, 24)
; CSKY-NEXT: bez32 a2, .LBB215_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 32
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI215_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umin i64* %a, i64 %b monotonic
ret i64 %1
}
define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i64_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 32
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: .LBB216_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 20)
; CSKY-NEXT: ld16.w a2, (sp, 16)
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 20)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 24)
; CSKY-NEXT: st16.w a1, (sp, 28)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 24
; CSKY-NEXT: jsri32 [.LCPI216_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 28)
; CSKY-NEXT: ld16.w a0, (sp, 24)
; CSKY-NEXT: bez32 a2, .LBB216_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 32
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI216_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umin i64* %a, i64 %b acquire
ret i64 %1
}
define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i64_release:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 32
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 0
; CSKY-NEXT: movi32 l4, 3
; CSKY-NEXT: .LBB217_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 20)
; CSKY-NEXT: ld16.w a2, (sp, 16)
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 20)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 24)
; CSKY-NEXT: st16.w a1, (sp, 28)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 24
; CSKY-NEXT: jsri32 [.LCPI217_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 28)
; CSKY-NEXT: ld16.w a0, (sp, 24)
; CSKY-NEXT: bez32 a2, .LBB217_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 32
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI217_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umin i64* %a, i64 %b release
ret i64 %1
}
define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i64_acq_rel:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 24
; CSKY-NEXT: st16.w l3, (sp, 20) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w l4, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 32
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 2
; CSKY-NEXT: movi32 l4, 4
; CSKY-NEXT: .LBB218_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 20)
; CSKY-NEXT: ld16.w a2, (sp, 16)
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 20)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 24)
; CSKY-NEXT: st16.w a1, (sp, 28)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st32.w l4, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 24
; CSKY-NEXT: jsri32 [.LCPI218_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 28)
; CSKY-NEXT: ld16.w a0, (sp, 24)
; CSKY-NEXT: bez32 a2, .LBB218_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 32
; CSKY-NEXT: ld32.w l4, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld32.w lr, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 20) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 24
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI218_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umin i64* %a, i64 %b acq_rel
ret i64 %1
}
define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
; CSKY-LABEL: atomicrmw_umin_i64_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 20
; CSKY-NEXT: st16.w l3, (sp, 16) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l2, (sp, 12) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l1, (sp, 8) # 4-byte Folded Spill
; CSKY-NEXT: st16.w l0, (sp, 4) # 4-byte Folded Spill
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 32
; CSKY-NEXT: mov16 l0, a2
; CSKY-NEXT: mov16 l1, a1
; CSKY-NEXT: mov16 l2, a0
; CSKY-NEXT: ld16.w a1, (a0, 4)
; CSKY-NEXT: ld16.w a0, (a0, 0)
; CSKY-NEXT: movi16 l3, 5
; CSKY-NEXT: .LBB219_1: # %atomicrmw.start
; CSKY-NEXT: # =>This Inner Loop Header: Depth=1
; CSKY-NEXT: cmphs16 l1, a0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 16)
; CSKY-NEXT: cmphs16 l0, a1
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: cmpne16 a1, l0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: st16.w a2, (sp, 20)
; CSKY-NEXT: ld16.w a2, (sp, 16)
; CSKY-NEXT: btsti16 a2, 0
; CSKY-NEXT: mvc32 a2
; CSKY-NEXT: ld16.w a3, (sp, 12)
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mvc32 a3
; CSKY-NEXT: ld32.w t0, (sp, 20)
; CSKY-NEXT: btsti32 t0, 0
; CSKY-NEXT: movf32 a3, a2
; CSKY-NEXT: btsti16 a3, 0
; CSKY-NEXT: mov16 a2, l1
; CSKY-NEXT: movt32 a2, a0
; CSKY-NEXT: mov16 a3, l0
; CSKY-NEXT: movt32 a3, a1
; CSKY-NEXT: st16.w a0, (sp, 24)
; CSKY-NEXT: st16.w a1, (sp, 28)
; CSKY-NEXT: st16.w l3, (sp, 4)
; CSKY-NEXT: st16.w l3, (sp, 0)
; CSKY-NEXT: mov16 a0, l2
; CSKY-NEXT: addi16 a1, sp, 24
; CSKY-NEXT: jsri32 [.LCPI219_0]
; CSKY-NEXT: mov16 a2, a0
; CSKY-NEXT: ld16.w a1, (sp, 28)
; CSKY-NEXT: ld16.w a0, (sp, 24)
; CSKY-NEXT: bez32 a2, .LBB219_1
; CSKY-NEXT: # %bb.2: # %atomicrmw.end
; CSKY-NEXT: addi16 sp, sp, 32
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l0, (sp, 4) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l1, (sp, 8) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l2, (sp, 12) # 4-byte Folded Reload
; CSKY-NEXT: ld16.w l3, (sp, 16) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 20
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.3:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI219_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
%1 = atomicrmw umin i64* %a, i64 %b seq_cst
ret i64 %1
}