| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVE |
| ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-MVEFP |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_oeq_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_oeq_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, eq |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, eq |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, eq |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_oeq_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 eq, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp oeq <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_one_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, mi |
| ; CHECK-MVE-NEXT: csinc r1, r1, zr, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, mi |
| ; CHECK-MVE-NEXT: csinc r2, r2, zr, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, mi |
| ; CHECK-MVE-NEXT: csinc r3, r3, zr, le |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_one_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f32 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q3, q2 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp one <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, gt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, gt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, gt |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ogt_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 gt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ogt <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_oge_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, ge |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, ge |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, ge |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_oge_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 ge, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp oge <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_olt_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, mi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, mi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, mi |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_olt_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 gt, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp olt <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ole_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, ls |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, ls |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, ls |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ole_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 ge, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ole <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ueq_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ueq_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, eq |
| ; CHECK-MVE-NEXT: csinc r1, r1, zr, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, eq |
| ; CHECK-MVE-NEXT: csinc r2, r2, zr, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, eq |
| ; CHECK-MVE-NEXT: csinc r3, r3, zr, vc |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ueq_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f32 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f32 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ueq <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_une_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_une_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_une_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 ne, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp une <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, hi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, hi |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, hi |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 lt, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ugt <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_uge_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, pl |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, pl |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, pl |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_uge_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp uge <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ult_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, lt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, lt |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, lt |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ult_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ult <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ule_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, le |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, le |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ule_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f32 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ule <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ord_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, vc |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, vc |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ord_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f32 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q3, q2 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ord <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { |
| ; CHECK-MVE-LABEL: vcmp_uno_v4f32: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 |
| ; CHECK-MVE-NEXT: cset r1, vs |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 |
| ; CHECK-MVE-NEXT: cset r2, vs |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r3, vs |
| ; CHECK-MVE-NEXT: cmp r2, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 |
| ; CHECK-MVE-NEXT: cmp r3, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 |
| ; CHECK-MVE-NEXT: cmp r1, #0 |
| ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_uno_v4f32: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f32 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f32 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp uno <4 x float> %src, %src2 |
| %s = select <4 x i1> %c, <4 x float> %a, <4 x float> %b |
| ret <4 x float> %s |
| } |
| |
| |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_oeq_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_oeq_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_oeq_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 eq, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp oeq <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_one_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_one_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f16 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q3, q2 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp one <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ogt_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, gt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ogt_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 gt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ogt <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_oge_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ge |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_oge_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 ge, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp oge <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_olt_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, mi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_olt_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 gt, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp olt <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ole_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, ls |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ole_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 ge, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ole <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ueq_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ueq_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, eq |
| ; CHECK-MVE-NEXT: csinc r0, r0, zr, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ueq_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f16 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f16 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ueq <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_une_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_une_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9, d10, d11} |
| ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s20, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s22, s12 |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s22, s20 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vmovx.f16 s12, s9 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s13 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: vmovx.f16 s5, s14 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s16, s12 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s12, s10 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s5, s12 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vmovx.f16 s10, s15 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s10, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9, d10, d11} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_une_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 ne, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp une <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ugt_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, hi |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 lt, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ugt <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_uge_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, pl |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_uge_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp uge <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ult_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, lt |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ult_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ult <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ule_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, le |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ule_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vcmp.f16 le, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ule <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_ord_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vc |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_ord_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f16 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q3, q2 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp ord <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %src2, <8 x half> %a, <8 x half> %b) { |
| ; CHECK-MVE-LABEL: vcmp_uno_v8f16: |
| ; CHECK-MVE: @ %bb.0: @ %entry |
| ; CHECK-MVE-NEXT: .vsave {d8, d9} |
| ; CHECK-MVE-NEXT: vpush {d8, d9} |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 |
| ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 |
| ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 |
| ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 |
| ; CHECK-MVE-NEXT: vins.f16 s0, s16 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 |
| ; CHECK-MVE-NEXT: vins.f16 s1, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 |
| ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 |
| ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 |
| ; CHECK-MVE-NEXT: vins.f16 s2, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 |
| ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 |
| ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 |
| ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 |
| ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr |
| ; CHECK-MVE-NEXT: cset r0, vs |
| ; CHECK-MVE-NEXT: cmp r0, #0 |
| ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 |
| ; CHECK-MVE-NEXT: vins.f16 s3, s4 |
| ; CHECK-MVE-NEXT: vpop {d8, d9} |
| ; CHECK-MVE-NEXT: bx lr |
| ; |
| ; CHECK-MVEFP-LABEL: vcmp_uno_v8f16: |
| ; CHECK-MVEFP: @ %bb.0: @ %entry |
| ; CHECK-MVEFP-NEXT: vpt.f16 le, q1, q0 |
| ; CHECK-MVEFP-NEXT: vcmpt.f16 lt, q0, q1 |
| ; CHECK-MVEFP-NEXT: vpsel q0, q2, q3 |
| ; CHECK-MVEFP-NEXT: bx lr |
| entry: |
| %c = fcmp uno <8 x half> %src, %src2 |
| %s = select <8 x i1> %c, <8 x half> %a, <8 x half> %b |
| ret <8 x half> %s |
| } |