| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s |
| |
| declare <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32, <8 x half>, <8 x half>, <8 x half>) |
| declare <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32, <4 x float>, <4 x float>, <4 x float>) |
| declare <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32, <8 x half>, <8 x half>) |
| declare <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32, <4 x float>, <4 x float>) |
| |
| |
| define arm_aapcs_vfpcc <4 x float> @reassoc_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: reassoc_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f32 q0, q1, q2, #0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c) |
| %res = fadd fast <4 x float> %d, %a |
| ret <4 x float> %res |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @reassoc_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: reassoc_c_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f32 q0, q1, q2, #90 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c) |
| %res = fadd fast <4 x float> %a, %d |
| ret <4 x float> %res |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @reassoc_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: reassoc_f16x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f16 q0, q1, q2, #180 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 2, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c) |
| %res = fadd fast <8 x half> %d, %a |
| ret <8 x half> %res |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @reassoc_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: reassoc_c_f16x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f16 q0, q1, q2, #270 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 3, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c) |
| %res = fadd fast <8 x half> %a, %d |
| ret <8 x half> %res |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @reassoc_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: reassoc_nonfast_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vmov.i32 q3, #0x0 |
| ; CHECK-NEXT: vcmla.f32 q3, q1, q2, #0 |
| ; CHECK-NEXT: vadd.f32 q0, q3, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c) |
| %res = fadd <4 x float> %d, %a |
| ret <4 x float> %res |
| } |
| |
| |
| |
| define arm_aapcs_vfpcc <4 x float> @muladd_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: muladd_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f32 q0, q1, q2, #0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c) |
| %res = fadd fast <4 x float> %d, %a |
| ret <4 x float> %res |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @muladd_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: muladd_c_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f32 q0, q1, q2, #90 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> %b, <4 x float> %c) |
| %res = fadd fast <4 x float> %a, %d |
| ret <4 x float> %res |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @muladd_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: muladd_f16x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f16 q0, q1, q2, #180 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> %b, <8 x half> %c) |
| %res = fadd fast <8 x half> %d, %a |
| ret <8 x half> %res |
| } |
| |
| define arm_aapcs_vfpcc <8 x half> @muladd_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) { |
| ; CHECK-LABEL: muladd_c_f16x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmla.f16 q0, q1, q2, #270 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> %b, <8 x half> %c) |
| %res = fadd fast <8 x half> %a, %d |
| ret <8 x half> %res |
| } |
| |
| define arm_aapcs_vfpcc <4 x float> @muladd_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) { |
| ; CHECK-LABEL: muladd_nonfast_f32x4: |
| ; CHECK: @ %bb.0: @ %entry |
| ; CHECK-NEXT: vcmul.f32 q3, q1, q2, #0 |
| ; CHECK-NEXT: vadd.f32 q0, q3, q0 |
| ; CHECK-NEXT: bx lr |
| entry: |
| %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c) |
| %res = fadd <4 x float> %d, %a |
| ret <4 x float> %res |
| } |