| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s |
| |
| define arm_aapcs_vfpcc i32 @vqdmulh_v16i8(<16 x i8> %s0, <16 x i8> %s1) { |
| ; CHECK-LABEL: vqdmulh_v16i8: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 |
| ; CHECK-NEXT: vaddv.s8 r0, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <16 x i8> %s0 to <16 x i32> |
| %l5 = sext <16 x i8> %s1 to <16 x i32> |
| %l6 = mul nsw <16 x i32> %l5, %l2 |
| %l7 = ashr <16 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> |
| %l8 = icmp slt <16 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l9 = select <16 x i1> %l8, <16 x i32> %l7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %l9) |
| ret i32 %l10 |
| } |
| |
| define arm_aapcs_vfpcc <16 x i8> @vqdmulh_v16i8_b(<16 x i8> %s0, <16 x i8> %s1) { |
| ; CHECK-LABEL: vqdmulh_v16i8_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <16 x i8> %s0 to <16 x i32> |
| %l5 = sext <16 x i8> %s1 to <16 x i32> |
| %l6 = mul nsw <16 x i32> %l5, %l2 |
| %l7 = ashr <16 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> |
| %l8 = icmp slt <16 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l9 = select <16 x i1> %l8, <16 x i32> %l7, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l10 = trunc <16 x i32> %l9 to <16 x i8> |
| ret <16 x i8> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i8> @vqdmulh_v8i8_b(<8 x i8> %s0, <8 x i8> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i8_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 |
| ; CHECK-NEXT: vmovlb.s8 q0, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <8 x i8> %s0 to <8 x i32> |
| %l5 = sext <8 x i8> %s1 to <8 x i32> |
| %l6 = mul nsw <8 x i32> %l5, %l2 |
| %l7 = ashr <8 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> |
| %l8 = icmp slt <8 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l9 = select <8 x i1> %l8, <8 x i32> %l7, <8 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l10 = trunc <8 x i32> %l9 to <8 x i8> |
| ret <8 x i8> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <4 x i8> @vqdmulh_v4i8_b(<4 x i8> %s0, <4 x i8> %s1) { |
| ; CHECK-LABEL: vqdmulh_v4i8_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 |
| ; CHECK-NEXT: vmovlb.s8 q0, q0 |
| ; CHECK-NEXT: vmovlb.s16 q0, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <4 x i8> %s0 to <4 x i32> |
| %l5 = sext <4 x i8> %s1 to <4 x i32> |
| %l6 = mul nsw <4 x i32> %l5, %l2 |
| %l7 = ashr <4 x i32> %l6, <i32 7, i32 7, i32 7, i32 7> |
| %l8 = icmp slt <4 x i32> %l7, <i32 127, i32 127, i32 127, i32 127> |
| %l9 = select <4 x i1> %l8, <4 x i32> %l7, <4 x i32> <i32 127, i32 127, i32 127, i32 127> |
| %l10 = trunc <4 x i32> %l9 to <4 x i8> |
| ret <4 x i8> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <32 x i8> @vqdmulh_v32i8_b(<32 x i8> %s0, <32 x i8> %s1) { |
| ; CHECK-LABEL: vqdmulh_v32i8_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s8 q0, q2, q0 |
| ; CHECK-NEXT: vqdmulh.s8 q1, q3, q1 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <32 x i8> %s0 to <32 x i32> |
| %l5 = sext <32 x i8> %s1 to <32 x i32> |
| %l6 = mul nsw <32 x i32> %l5, %l2 |
| %l7 = ashr <32 x i32> %l6, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> |
| %l8 = icmp slt <32 x i32> %l7, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l9 = select <32 x i1> %l8, <32 x i32> %l7, <32 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %l10 = trunc <32 x i32> %l9 to <32 x i8> |
| ret <32 x i8> %l10 |
| } |
| |
| define arm_aapcs_vfpcc i32 @vqdmulh_v8i16(<8 x i16> %s0, <8 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i16: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: vaddv.s16 r0, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <8 x i16> %s0 to <8 x i32> |
| %l5 = sext <8 x i16> %s1 to <8 x i32> |
| %l6 = mul nsw <8 x i32> %l5, %l2 |
| %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> |
| %l8 = icmp slt <8 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l9 = select <8 x i1> %l8, <8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l10 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l9) |
| ret i32 %l10 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_b(<8 x i16> %s0, <8 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i16_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <8 x i16> %s0 to <8 x i32> |
| %l5 = sext <8 x i16> %s1 to <8 x i32> |
| %l6 = mul nsw <8 x i32> %l5, %l2 |
| %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> |
| %l8 = icmp slt <8 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l9 = select <8 x i1> %l8, <8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l10 = trunc <8 x i32> %l9 to <8 x i16> |
| ret <8 x i16> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <4 x i16> @vqdmulh_v4i16_b(<4 x i16> %s0, <4 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v4i16_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: vmovlb.s16 q0, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <4 x i16> %s0 to <4 x i32> |
| %l5 = sext <4 x i16> %s1 to <4 x i32> |
| %l6 = mul nsw <4 x i32> %l5, %l2 |
| %l7 = ashr <4 x i32> %l6, <i32 15, i32 15, i32 15, i32 15> |
| %l4 = icmp slt <4 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l9 = select <4 x i1> %l4, <4 x i32> %l7, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l10 = trunc <4 x i32> %l9 to <4 x i16> |
| ret <4 x i16> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <16 x i16> @vqdmulh_v16i16_b(<16 x i16> %s0, <16 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v16i16_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s16 q0, q2, q0 |
| ; CHECK-NEXT: vqdmulh.s16 q1, q3, q1 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <16 x i16> %s0 to <16 x i32> |
| %l5 = sext <16 x i16> %s1 to <16 x i32> |
| %l6 = mul nsw <16 x i32> %l5, %l2 |
| %l7 = ashr <16 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> |
| %l16 = icmp slt <16 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l9 = select <16 x i1> %l16, <16 x i32> %l7, <16 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l10 = trunc <16 x i32> %l9 to <16 x i16> |
| ret <16 x i16> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_c(<8 x i16> %s0, <8 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i16_c: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .pad #16 |
| ; CHECK-NEXT: sub sp, #16 |
| ; CHECK-NEXT: vmov.u16 r0, q0[6] |
| ; CHECK-NEXT: vmov.u16 r1, q0[4] |
| ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 |
| ; CHECK-NEXT: vmov.u16 r0, q0[7] |
| ; CHECK-NEXT: vmov.u16 r1, q0[5] |
| ; CHECK-NEXT: vmov.u16 r2, q0[0] |
| ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 |
| ; CHECK-NEXT: vmov.u16 r0, q1[6] |
| ; CHECK-NEXT: vmov.u16 r1, q1[4] |
| ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 |
| ; CHECK-NEXT: vmov.u16 r0, q1[7] |
| ; CHECK-NEXT: vmov.u16 r1, q1[5] |
| ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 |
| ; CHECK-NEXT: mov r0, sp |
| ; CHECK-NEXT: vmullb.s16 q2, q3, q2 |
| ; CHECK-NEXT: vmov.u16 r1, q0[2] |
| ; CHECK-NEXT: vshl.i32 q2, q2, #10 |
| ; CHECK-NEXT: vshr.s32 q2, q2, #10 |
| ; CHECK-NEXT: vshr.s32 q2, q2, #15 |
| ; CHECK-NEXT: vstrh.32 q2, [r0, #8] |
| ; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 |
| ; CHECK-NEXT: vmov.u16 r1, q0[3] |
| ; CHECK-NEXT: vmov.u16 r2, q0[1] |
| ; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 |
| ; CHECK-NEXT: vmov.u16 r1, q1[2] |
| ; CHECK-NEXT: vmov.u16 r2, q1[0] |
| ; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 |
| ; CHECK-NEXT: vmov.u16 r1, q1[3] |
| ; CHECK-NEXT: vmov.u16 r2, q1[1] |
| ; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 |
| ; CHECK-NEXT: vmullb.s16 q0, q0, q2 |
| ; CHECK-NEXT: vshl.i32 q0, q0, #10 |
| ; CHECK-NEXT: vshr.s32 q0, q0, #10 |
| ; CHECK-NEXT: vshr.s32 q0, q0, #15 |
| ; CHECK-NEXT: vstrh.32 q0, [r0] |
| ; CHECK-NEXT: vldrw.u32 q0, [r0] |
| ; CHECK-NEXT: add sp, #16 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <8 x i16> %s0 to <8 x i22> |
| %l5 = sext <8 x i16> %s1 to <8 x i22> |
| %l6 = mul nsw <8 x i22> %l5, %l2 |
| %l7 = ashr <8 x i22> %l6, <i22 15, i22 15, i22 15, i22 15, i22 15, i22 15, i22 15, i22 15> |
| %l8 = icmp slt <8 x i22> %l7, <i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767> |
| %l9 = select <8 x i1> %l8, <8 x i22> %l7, <8 x i22> <i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767, i22 32767> |
| %l10 = trunc <8 x i22> %l9 to <8 x i16> |
| ret <8 x i16> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved(<8 x i16> %s0, <8 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i16_interleaved: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %0 = shufflevector <8 x i16> %s0, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7> |
| %1 = sext <8 x i16> %0 to <8 x i32> |
| %l2 = sext <8 x i16> %s0 to <8 x i32> |
| %2 = shufflevector <8 x i16> %s1, <8 x i16> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7> |
| %3 = sext <8 x i16> %2 to <8 x i32> |
| %l5 = sext <8 x i16> %s1 to <8 x i32> |
| %l6 = mul nsw <8 x i32> %3, %1 |
| %l7 = ashr <8 x i32> %l6, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> |
| %l8 = icmp slt <8 x i32> %l7, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l9 = select <8 x i1> %l8, <8 x i32> %l7, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %l10 = trunc <8 x i32> %l9 to <8 x i16> |
| %4 = shufflevector <8 x i16> %l10, <8 x i16> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7> |
| ret <8 x i16> %4 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved2(<4 x i32> %s0a, <8 x i16> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i16_interleaved2: |
| ; CHECK: @ %bb.0: |
| ; CHECK-NEXT: vqdmulh.s16 q2, q1, q0 |
| ; CHECK-NEXT: vrev32.16 q1, q1 |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: vmovnt.i32 q2, q0 |
| ; CHECK-NEXT: vmov q0, q2 |
| ; CHECK-NEXT: bx lr |
| %s0 = trunc <4 x i32> %s0a to <4 x i16> |
| %strided.vec = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| %strided.vec44 = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7> |
| %l7 = sext <4 x i16> %strided.vec to <4 x i32> |
| %l8 = sext <4 x i16> %s0 to <4 x i32> |
| %l9 = mul nsw <4 x i32> %l7, %l8 |
| %l10 = ashr <4 x i32> %l9, <i32 15, i32 15, i32 15, i32 15> |
| %l11 = icmp slt <4 x i32> %l10, <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l12 = select <4 x i1> %l11, <4 x i32> %l10, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l13 = trunc <4 x i32> %l12 to <4 x i16> |
| %l14 = sext <4 x i16> %strided.vec44 to <4 x i32> |
| %l15 = mul nsw <4 x i32> %l14, %l8 |
| %l16 = ashr <4 x i32> %l15, <i32 15, i32 15, i32 15, i32 15> |
| %l17 = icmp slt <4 x i32> %l16, <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l18 = select <4 x i1> %l17, <4 x i32> %l16, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767> |
| %l19 = trunc <4 x i32> %l18 to <4 x i16> |
| %interleaved.vec = shufflevector <4 x i16> %l13, <4 x i16> %l19, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7> |
| ret <8 x i16> %interleaved.vec |
| } |
| |
| define arm_aapcs_vfpcc i64 @vqdmulh_v4i32(<4 x i32> %s0, <4 x i32> %s1) { |
| ; CHECK-LABEL: vqdmulh_v4i32: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 |
| ; CHECK-NEXT: vaddlv.s32 r0, r1, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <4 x i32> %s0 to <4 x i64> |
| %l5 = sext <4 x i32> %s1 to <4 x i64> |
| %l6 = mul nsw <4 x i64> %l5, %l2 |
| %l7 = ashr <4 x i64> %l6, <i64 31, i64 31, i64 31, i64 31> |
| %l8 = icmp slt <4 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l9 = select <4 x i1> %l8, <4 x i64> %l7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l10 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %l9) |
| ret i64 %l10 |
| } |
| |
| define arm_aapcs_vfpcc <4 x i32> @vqdmulh_v4i32_b(<4 x i32> %s0, <4 x i32> %s1) { |
| ; CHECK-LABEL: vqdmulh_v4i32_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <4 x i32> %s0 to <4 x i64> |
| %l5 = sext <4 x i32> %s1 to <4 x i64> |
| %l6 = mul nsw <4 x i64> %l5, %l2 |
| %l7 = ashr <4 x i64> %l6, <i64 31, i64 31, i64 31, i64 31> |
| %l8 = icmp slt <4 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l9 = select <4 x i1> %l8, <4 x i64> %l7, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l10 = trunc <4 x i64> %l9 to <4 x i32> |
| ret <4 x i32> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <2 x i32> @vqdmulh_v2i32_b(<2 x i32> %s0, <2 x i32> %s1) { |
| ; CHECK-LABEL: vqdmulh_v2i32_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 |
| ; CHECK-NEXT: vmov r0, s2 |
| ; CHECK-NEXT: vmov r1, s0 |
| ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 |
| ; CHECK-NEXT: asrs r0, r0, #31 |
| ; CHECK-NEXT: asrs r1, r1, #31 |
| ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <2 x i32> %s0 to <2 x i64> |
| %l5 = sext <2 x i32> %s1 to <2 x i64> |
| %l6 = mul nsw <2 x i64> %l5, %l2 |
| %l7 = ashr <2 x i64> %l6, <i64 31, i64 31> |
| %l8 = icmp slt <2 x i64> %l7, <i64 2147483647, i64 2147483647> |
| %l9 = select <2 x i1> %l8, <2 x i64> %l7, <2 x i64> <i64 2147483647, i64 2147483647> |
| %l10 = trunc <2 x i64> %l9 to <2 x i32> |
| ret <2 x i32> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <8 x i32> @vqdmulh_v8i32_b(<8 x i32> %s0, <8 x i32> %s1) { |
| ; CHECK-LABEL: vqdmulh_v8i32_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vqdmulh.s32 q0, q2, q0 |
| ; CHECK-NEXT: vqdmulh.s32 q1, q3, q1 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <8 x i32> %s0 to <8 x i64> |
| %l5 = sext <8 x i32> %s1 to <8 x i64> |
| %l6 = mul nsw <8 x i64> %l5, %l2 |
| %l7 = ashr <8 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31> |
| %l8 = icmp slt <8 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l9 = select <8 x i1> %l8, <8 x i64> %l7, <8 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l10 = trunc <8 x i64> %l9 to <8 x i32> |
| ret <8 x i32> %l10 |
| } |
| |
| define arm_aapcs_vfpcc <16 x i32> @vqdmulh_v16i32_b(<16 x i32> %s0, <16 x i32> %s1) { |
| ; CHECK-LABEL: vqdmulh_v16i32_b: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .vsave {d8, d9} |
| ; CHECK-NEXT: vpush {d8, d9} |
| ; CHECK-NEXT: add r0, sp, #16 |
| ; CHECK-NEXT: vldrw.u32 q4, [r0] |
| ; CHECK-NEXT: add r0, sp, #32 |
| ; CHECK-NEXT: vqdmulh.s32 q0, q4, q0 |
| ; CHECK-NEXT: vldrw.u32 q4, [r0] |
| ; CHECK-NEXT: add r0, sp, #48 |
| ; CHECK-NEXT: vqdmulh.s32 q1, q4, q1 |
| ; CHECK-NEXT: vldrw.u32 q4, [r0] |
| ; CHECK-NEXT: add r0, sp, #64 |
| ; CHECK-NEXT: vqdmulh.s32 q2, q4, q2 |
| ; CHECK-NEXT: vldrw.u32 q4, [r0] |
| ; CHECK-NEXT: vqdmulh.s32 q3, q4, q3 |
| ; CHECK-NEXT: vpop {d8, d9} |
| ; CHECK-NEXT: bx lr |
| entry: |
| %l2 = sext <16 x i32> %s0 to <16 x i64> |
| %l5 = sext <16 x i32> %s1 to <16 x i64> |
| %l6 = mul nsw <16 x i64> %l5, %l2 |
| %l7 = ashr <16 x i64> %l6, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31> |
| %l8 = icmp slt <16 x i64> %l7, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l9 = select <16 x i1> %l8, <16 x i64> %l7, <16 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %l10 = trunc <16 x i64> %l9 to <16 x i32> |
| ret <16 x i32> %l10 |
| } |
| |
| |
| |
| define void @vqdmulh_loop_i8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) local_unnamed_addr #0 { |
| ; CHECK-LABEL: vqdmulh_loop_i8: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: mov.w lr, #64 |
| ; CHECK-NEXT: .LBB17_1: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrb.u8 q0, [r0], #16 |
| ; CHECK-NEXT: vldrb.u8 q1, [r1], #16 |
| ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 |
| ; CHECK-NEXT: vstrb.8 q0, [r2], #16 |
| ; CHECK-NEXT: le lr, .LBB17_1 |
| ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %entry |
| %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] |
| %0 = getelementptr inbounds i8, ptr %x, i32 %index |
| %wide.load = load <16 x i8>, ptr %0, align 1 |
| %1 = sext <16 x i8> %wide.load to <16 x i32> |
| %2 = getelementptr inbounds i8, ptr %y, i32 %index |
| %wide.load26 = load <16 x i8>, ptr %2, align 1 |
| %3 = sext <16 x i8> %wide.load26 to <16 x i32> |
| %4 = mul nsw <16 x i32> %3, %1 |
| %5 = ashr <16 x i32> %4, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> |
| %6 = icmp slt <16 x i32> %5, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %7 = select <16 x i1> %6, <16 x i32> %5, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127> |
| %8 = trunc <16 x i32> %7 to <16 x i8> |
| %9 = getelementptr inbounds i8, ptr %z, i32 %index |
| store <16 x i8> %8, ptr %9, align 1 |
| %index.next = add i32 %index, 16 |
| %10 = icmp eq i32 %index.next, 1024 |
| br i1 %10, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body |
| ret void |
| } |
| |
| define void @vqdmulh_loop_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) { |
| ; CHECK-LABEL: vqdmulh_loop_i16: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: mov.w lr, #128 |
| ; CHECK-NEXT: .LBB18_1: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrh.u16 q0, [r0], #16 |
| ; CHECK-NEXT: vldrh.u16 q1, [r1], #16 |
| ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 |
| ; CHECK-NEXT: vstrb.8 q0, [r2], #16 |
| ; CHECK-NEXT: le lr, .LBB18_1 |
| ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %entry |
| %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] |
| %0 = getelementptr inbounds i16, ptr %x, i32 %index |
| %wide.load = load <8 x i16>, ptr %0, align 2 |
| %1 = sext <8 x i16> %wide.load to <8 x i32> |
| %2 = getelementptr inbounds i16, ptr %y, i32 %index |
| %wide.load30 = load <8 x i16>, ptr %2, align 2 |
| %3 = sext <8 x i16> %wide.load30 to <8 x i32> |
| %4 = mul nsw <8 x i32> %3, %1 |
| %5 = ashr <8 x i32> %4, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> |
| %6 = icmp slt <8 x i32> %5, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %7 = select <8 x i1> %6, <8 x i32> %5, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767> |
| %8 = trunc <8 x i32> %7 to <8 x i16> |
| %9 = getelementptr inbounds i16, ptr %z, i32 %index |
| store <8 x i16> %8, ptr %9, align 2 |
| %index.next = add i32 %index, 8 |
| %10 = icmp eq i32 %index.next, 1024 |
| br i1 %10, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body |
| ret void |
| } |
| |
| define void @vqdmulh_loop_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) { |
| ; CHECK-LABEL: vqdmulh_loop_i32: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r7, lr} |
| ; CHECK-NEXT: push {r7, lr} |
| ; CHECK-NEXT: mov.w lr, #256 |
| ; CHECK-NEXT: .LBB19_1: @ %vector.body |
| ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 |
| ; CHECK-NEXT: vldrw.u32 q0, [r0], #16 |
| ; CHECK-NEXT: vldrw.u32 q1, [r1], #16 |
| ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 |
| ; CHECK-NEXT: vstrb.8 q0, [r2], #16 |
| ; CHECK-NEXT: le lr, .LBB19_1 |
| ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup |
| ; CHECK-NEXT: pop {r7, pc} |
| entry: |
| br label %vector.body |
| |
| vector.body: ; preds = %vector.body, %entry |
| %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] |
| %0 = getelementptr inbounds i32, ptr %x, i32 %index |
| %wide.load = load <4 x i32>, ptr %0, align 4 |
| %1 = sext <4 x i32> %wide.load to <4 x i64> |
| %2 = getelementptr inbounds i32, ptr %y, i32 %index |
| %wide.load30 = load <4 x i32>, ptr %2, align 4 |
| %3 = sext <4 x i32> %wide.load30 to <4 x i64> |
| %4 = mul nsw <4 x i64> %3, %1 |
| %5 = ashr <4 x i64> %4, <i64 31, i64 31, i64 31, i64 31> |
| %6 = icmp slt <4 x i64> %5, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %7 = select <4 x i1> %6, <4 x i64> %5, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647> |
| %8 = trunc <4 x i64> %7 to <4 x i32> |
| %9 = getelementptr inbounds i32, ptr %z, i32 %index |
| store <4 x i32> %8, ptr %9, align 4 |
| %index.next = add i32 %index, 4 |
| %10 = icmp eq i32 %index.next, 1024 |
| br i1 %10, label %for.cond.cleanup, label %vector.body |
| |
| for.cond.cleanup: ; preds = %vector.body |
| ret void |
| } |
| |
| define <2 x i64> @large_i128(<2 x double> %x) { |
| ; CHECK-LABEL: large_i128: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} |
| ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} |
| ; CHECK-NEXT: .pad #4 |
| ; CHECK-NEXT: sub sp, #4 |
| ; CHECK-NEXT: mov r8, r3 |
| ; CHECK-NEXT: mov r5, r2 |
| ; CHECK-NEXT: bl __fixdfti |
| ; CHECK-NEXT: subs r7, r2, #1 |
| ; CHECK-NEXT: mov.w r9, #1 |
| ; CHECK-NEXT: sbcs r7, r3, #0 |
| ; CHECK-NEXT: mov.w r4, #0 |
| ; CHECK-NEXT: cset r7, lt |
| ; CHECK-NEXT: cmp r7, #0 |
| ; CHECK-NEXT: csel r0, r0, r7, ne |
| ; CHECK-NEXT: csel r3, r3, r7, ne |
| ; CHECK-NEXT: csel r2, r2, r9, ne |
| ; CHECK-NEXT: csel r1, r1, r7, ne |
| ; CHECK-NEXT: rsbs r7, r0, #0 |
| ; CHECK-NEXT: sbcs.w r7, r4, r1 |
| ; CHECK-NEXT: sbcs.w r2, r4, r2 |
| ; CHECK-NEXT: sbcs.w r2, r4, r3 |
| ; CHECK-NEXT: cset r2, lt |
| ; CHECK-NEXT: cmp r2, #0 |
| ; CHECK-NEXT: csel r6, r0, r2, ne |
| ; CHECK-NEXT: csel r7, r1, r2, ne |
| ; CHECK-NEXT: mov r0, r5 |
| ; CHECK-NEXT: mov r1, r8 |
| ; CHECK-NEXT: bl __fixdfti |
| ; CHECK-NEXT: subs r5, r2, #1 |
| ; CHECK-NEXT: sbcs r5, r3, #0 |
| ; CHECK-NEXT: cset r5, lt |
| ; CHECK-NEXT: cmp r5, #0 |
| ; CHECK-NEXT: csel r0, r0, r5, ne |
| ; CHECK-NEXT: csel r3, r3, r5, ne |
| ; CHECK-NEXT: csel r2, r2, r9, ne |
| ; CHECK-NEXT: csel r1, r1, r5, ne |
| ; CHECK-NEXT: rsbs r5, r0, #0 |
| ; CHECK-NEXT: sbcs.w r5, r4, r1 |
| ; CHECK-NEXT: sbcs.w r2, r4, r2 |
| ; CHECK-NEXT: sbcs.w r2, r4, r3 |
| ; CHECK-NEXT: cset r3, lt |
| ; CHECK-NEXT: cmp r3, #0 |
| ; CHECK-NEXT: csel r2, r0, r3, ne |
| ; CHECK-NEXT: csel r3, r1, r3, ne |
| ; CHECK-NEXT: mov r0, r6 |
| ; CHECK-NEXT: mov r1, r7 |
| ; CHECK-NEXT: add sp, #4 |
| ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} |
| entry: |
| %conv = fptosi <2 x double> %x to <2 x i128> |
| %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616> |
| %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616> |
| %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer |
| %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer |
| %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64> |
| ret <2 x i64> %conv6 |
| } |
| |
| declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) |
| declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) |
| declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) |