| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 |
| ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 |
| |
| declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) |
| |
| define half @vreduce_fadd_v1f16(<1 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v1f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <1 x half>, <1 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v1f16(<1 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v1f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <1 x half>, <1 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v2f16(half, <2 x half>) |
| |
| define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <2 x half>, <2 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v2f16(<2 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x half>, <2 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>) |
| |
| define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v4f16(<4 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>) |
| |
| define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v8f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI6_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <8 x half>, <8 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v8f16(<8 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v8f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <8 x half>, <8 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>) |
| |
| define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v16f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI8_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI8_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <16 x half>, <16 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v16f16(<16 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v16f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, fa0 |
| ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <16 x half>, <16 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>) |
| |
| define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) { |
| ; RV32-LABEL: vreduce_fadd_v32f16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: lui a2, %hi(.LCPI10_0) |
| ; RV32-NEXT: flh ft0, %lo(.LCPI10_0)(a2) |
| ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; RV32-NEXT: vle16.v v8, (a0) |
| ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; RV32-NEXT: vfmv.v.f v12, ft0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; RV32-NEXT: vfredusum.vs v8, v8, v12 |
| ; RV32-NEXT: vfmv.f.s ft0, v8 |
| ; RV32-NEXT: fadd.h fa0, fa0, ft0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vreduce_fadd_v32f16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: lui a1, %hi(.LCPI10_0) |
| ; RV64-NEXT: flh ft0, %lo(.LCPI10_0)(a1) |
| ; RV64-NEXT: li a1, 32 |
| ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; RV64-NEXT: vle16.v v8, (a0) |
| ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; RV64-NEXT: vfmv.v.f v12, ft0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; RV64-NEXT: vfredusum.vs v8, v8, v12 |
| ; RV64-NEXT: vfmv.f.s ft0, v8 |
| ; RV64-NEXT: fadd.h fa0, fa0, ft0 |
| ; RV64-NEXT: ret |
| %v = load <32 x half>, <32 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v32f16(<32 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v32f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 32 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v12 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <32 x half>, <32 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>) |
| |
| define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) { |
| ; RV32-LABEL: vreduce_fadd_v64f16: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: li a1, 64 |
| ; RV32-NEXT: lui a2, %hi(.LCPI12_0) |
| ; RV32-NEXT: flh ft0, %lo(.LCPI12_0)(a2) |
| ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; RV32-NEXT: vle16.v v8, (a0) |
| ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; RV32-NEXT: vfmv.v.f v16, ft0 |
| ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; RV32-NEXT: vfredusum.vs v8, v8, v16 |
| ; RV32-NEXT: vfmv.f.s ft0, v8 |
| ; RV32-NEXT: fadd.h fa0, fa0, ft0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vreduce_fadd_v64f16: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: lui a1, %hi(.LCPI12_0) |
| ; RV64-NEXT: flh ft0, %lo(.LCPI12_0)(a1) |
| ; RV64-NEXT: li a1, 64 |
| ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; RV64-NEXT: vle16.v v8, (a0) |
| ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; RV64-NEXT: vfmv.v.f v16, ft0 |
| ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; RV64-NEXT: vfredusum.vs v8, v8, v16 |
| ; RV64-NEXT: vfmv.f.s ft0, v8 |
| ; RV64-NEXT: fadd.h fa0, fa0, ft0 |
| ; RV64-NEXT: ret |
| %v = load <64 x half>, <64 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v64f16(<64 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v64f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 64 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, fa0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <64 x half>, <64 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>) |
| |
| define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_fadd_v128f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 64 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle16.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI14_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI14_0)(a0) |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.h fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <128 x half>, <128 x half>* %x |
| %red = call reassoc half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_ord_fadd_v128f16(<128 x half>* %x, half %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v128f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, 128 |
| ; CHECK-NEXT: li a2, 64 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a1) |
| ; CHECK-NEXT: vle16.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v24, fa0 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v16, v16, v24 |
| ; CHECK-NEXT: vfmv.f.s ft0, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <128 x half>, <128 x half>* %x |
| %red = call half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v) |
| ret half %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>) |
| |
| define float @vreduce_fadd_v1f32(<1 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v1f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <1 x float>, <1 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v1f32(<1 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v1f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <1 x float>, <1 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>) |
| |
| define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI18_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <2 x float>, <2 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v2f32(<2 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x float>, <2 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) |
| |
| define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI20_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v4f32(<4 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>) |
| |
| define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v8f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI22_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI22_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <8 x float>, <8 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v8f32(<8 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v8f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, fa0 |
| ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <8 x float>, <8 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) |
| |
| define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v16f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI24_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI24_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v12, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v12 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <16 x float>, <16 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v16f32(<16 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v16f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v12 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <16 x float>, <16 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>) |
| |
| define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) { |
| ; RV32-LABEL: vreduce_fadd_v32f32: |
| ; RV32: # %bb.0: |
| ; RV32-NEXT: li a1, 32 |
| ; RV32-NEXT: lui a2, %hi(.LCPI26_0) |
| ; RV32-NEXT: flw ft0, %lo(.LCPI26_0)(a2) |
| ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; RV32-NEXT: vle32.v v8, (a0) |
| ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; RV32-NEXT: vfmv.v.f v16, ft0 |
| ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; RV32-NEXT: vfredusum.vs v8, v8, v16 |
| ; RV32-NEXT: vfmv.f.s ft0, v8 |
| ; RV32-NEXT: fadd.s fa0, fa0, ft0 |
| ; RV32-NEXT: ret |
| ; |
| ; RV64-LABEL: vreduce_fadd_v32f32: |
| ; RV64: # %bb.0: |
| ; RV64-NEXT: lui a1, %hi(.LCPI26_0) |
| ; RV64-NEXT: flw ft0, %lo(.LCPI26_0)(a1) |
| ; RV64-NEXT: li a1, 32 |
| ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; RV64-NEXT: vle32.v v8, (a0) |
| ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; RV64-NEXT: vfmv.v.f v16, ft0 |
| ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; RV64-NEXT: vfredusum.vs v8, v8, v16 |
| ; RV64-NEXT: vfmv.f.s ft0, v8 |
| ; RV64-NEXT: fadd.s fa0, fa0, ft0 |
| ; RV64-NEXT: ret |
| %v = load <32 x float>, <32 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v32f32(<32 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v32f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 32 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, fa0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <32 x float>, <32 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>) |
| |
| define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_fadd_v64f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 32 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle32.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI28_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI28_0)(a0) |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.s fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <64 x float>, <64 x float>* %x |
| %red = call reassoc float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_ord_fadd_v64f32(<64 x float>* %x, float %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v64f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, 128 |
| ; CHECK-NEXT: li a2, 32 |
| ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a1) |
| ; CHECK-NEXT: vle32.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v24, fa0 |
| ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v16, v16, v24 |
| ; CHECK-NEXT: vfmv.f.s ft0, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <64 x float>, <64 x float>* %x |
| %red = call float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v) |
| ret float %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>) |
| |
| define double @vreduce_fadd_v1f64(<1 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v1f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <1 x double>, <1 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v1f64(<1 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v1f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <1 x double>, <1 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>) |
| |
| define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI32_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI32_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <2 x double>, <2 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v2f64(<2 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, fa0 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x double>, <2 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>) |
| |
| define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI34_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI34_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v4f64(<4 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, fa0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>) |
| |
| define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI36_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI36_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v12, ft0 |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v12 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <8 x double>, <8 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v8f64(<8 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v8f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v12, fa0 |
| ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v12 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <8 x double>, <8 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>) |
| |
| define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v16f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI38_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI38_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <16 x double>, <16 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v16f64(<16 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v16f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, fa0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <16 x double>, <16 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>) |
| |
| define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_fadd_v32f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle64.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI40_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI40_0)(a0) |
| ; CHECK-NEXT: vfadd.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredusum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s ft0, v8 |
| ; CHECK-NEXT: fadd.d fa0, fa0, ft0 |
| ; CHECK-NEXT: ret |
| %v = load <32 x double>, <32 x double>* %x |
| %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_ord_fadd_v32f64(<32 x double>* %x, double %s) { |
| ; CHECK-LABEL: vreduce_ord_fadd_v32f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: addi a1, a0, 128 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a1) |
| ; CHECK-NEXT: vle64.v v16, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v24, fa0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v16, v16, v24 |
| ; CHECK-NEXT: vfmv.f.s ft0, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredosum.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <32 x double>, <32 x double>* %x |
| %red = call double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v) |
| ret double %red |
| } |
| |
| declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>) |
| |
| define half @vreduce_fmin_v2f16(<2 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI42_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI42_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x half>, <2 x half>* %x |
| %red = call half @llvm.vector.reduce.fmin.v2f16(<2 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>) |
| |
| define half @vreduce_fmin_v4f16(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI43_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI43_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_fmin_v4f16_nonans(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f16_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI44_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI44_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call nnan half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_fmin_v4f16_nonans_noinfs(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f16_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI45_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI45_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call nnan ninf half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fmin.v128f16(<128 x half>) |
| |
| define half @vreduce_fmin_v128f16(<128 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v128f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 64 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle16.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI46_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI46_0)(a0) |
| ; CHECK-NEXT: vfmin.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <128 x half>, <128 x half>* %x |
| %red = call half @llvm.vector.reduce.fmin.v128f16(<128 x half> %v) |
| ret half %red |
| } |
| |
| declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>) |
| |
| define float @vreduce_fmin_v2f32(<2 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI47_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI47_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x float>, <2 x float>* %x |
| %red = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) |
| |
| define float @vreduce_fmin_v4f32(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI48_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI48_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_fmin_v4f32_nonans(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f32_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI49_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI49_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_fmin_v4f32_nonans_noinfs(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f32_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI50_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI50_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call nnan ninf float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fmin.v128f32(<128 x float>) |
| |
| define float @vreduce_fmin_v128f32(<128 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v128f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 32 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: addi a2, a0, 384 |
| ; CHECK-NEXT: vle32.v v8, (a2) |
| ; CHECK-NEXT: addi a2, a0, 128 |
| ; CHECK-NEXT: vle32.v v16, (a2) |
| ; CHECK-NEXT: vle32.v v24, (a0) |
| ; CHECK-NEXT: addi a0, a0, 256 |
| ; CHECK-NEXT: vle32.v v0, (a0) |
| ; CHECK-NEXT: vfmin.vv v8, v16, v8 |
| ; CHECK-NEXT: lui a0, %hi(.LCPI51_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI51_0)(a0) |
| ; CHECK-NEXT: vfmin.vv v16, v24, v0 |
| ; CHECK-NEXT: vfmin.vv v8, v16, v8 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <128 x float>, <128 x float>* %x |
| %red = call float @llvm.vector.reduce.fmin.v128f32(<128 x float> %v) |
| ret float %red |
| } |
| |
| declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>) |
| |
| define double @vreduce_fmin_v2f64(<2 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI52_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI52_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x double>, <2 x double>* %x |
| %red = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>) |
| |
| define double @vreduce_fmin_v4f64(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI53_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI53_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_fmin_v4f64_nonans(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f64_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI54_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI54_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call nnan double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_fmin_v4f64_nonans_noinfs(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v4f64_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI55_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI55_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call nnan ninf double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fmin.v32f64(<32 x double>) |
| |
| define double @vreduce_fmin_v32f64(<32 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmin_v32f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle64.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI56_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI56_0)(a0) |
| ; CHECK-NEXT: vfmin.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredmin.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <32 x double>, <32 x double>* %x |
| %red = call double @llvm.vector.reduce.fmin.v32f64(<32 x double> %v) |
| ret double %red |
| } |
| |
| declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>) |
| |
| define half @vreduce_fmax_v2f16(<2 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v2f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI57_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI57_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x half>, <2 x half>* %x |
| %red = call half @llvm.vector.reduce.fmax.v2f16(<2 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>) |
| |
| define half @vreduce_fmax_v4f16(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI58_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI58_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_fmax_v4f16_nonans(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f16_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI59_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI59_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call nnan half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| define half @vreduce_fmax_v4f16_nonans_noinfs(<4 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f16_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI60_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI60_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x half>, <4 x half>* %x |
| %red = call nnan ninf half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) |
| ret half %red |
| } |
| |
| declare half @llvm.vector.reduce.fmax.v128f16(<128 x half>) |
| |
| define half @vreduce_fmax_v128f16(<128 x half>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v128f16: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 64 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vle16.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle16.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI61_0) |
| ; CHECK-NEXT: flh ft0, %lo(.LCPI61_0)(a0) |
| ; CHECK-NEXT: vfmax.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <128 x half>, <128 x half>* %x |
| %red = call half @llvm.vector.reduce.fmax.v128f16(<128 x half> %v) |
| ret half %red |
| } |
| |
| declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>) |
| |
| define float @vreduce_fmax_v2f32(<2 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v2f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI62_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI62_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x float>, <2 x float>* %x |
| %red = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) |
| |
| define float @vreduce_fmax_v4f32(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI63_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI63_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_fmax_v4f32_nonans(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f32_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI64_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI64_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| define float @vreduce_fmax_v4f32_nonans_noinfs(<4 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f32_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI65_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI65_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vle32.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x float>, <4 x float>* %x |
| %red = call nnan ninf float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) |
| ret float %red |
| } |
| |
| declare float @llvm.vector.reduce.fmax.v128f32(<128 x float>) |
| |
| define float @vreduce_fmax_v128f32(<128 x float>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v128f32: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: li a1, 32 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: addi a2, a0, 384 |
| ; CHECK-NEXT: vle32.v v8, (a2) |
| ; CHECK-NEXT: addi a2, a0, 128 |
| ; CHECK-NEXT: vle32.v v16, (a2) |
| ; CHECK-NEXT: vle32.v v24, (a0) |
| ; CHECK-NEXT: addi a0, a0, 256 |
| ; CHECK-NEXT: vle32.v v0, (a0) |
| ; CHECK-NEXT: vfmax.vv v8, v16, v8 |
| ; CHECK-NEXT: lui a0, %hi(.LCPI66_0) |
| ; CHECK-NEXT: flw ft0, %lo(.LCPI66_0)(a0) |
| ; CHECK-NEXT: vfmax.vv v16, v24, v0 |
| ; CHECK-NEXT: vfmax.vv v8, v16, v8 |
| ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <128 x float>, <128 x float>* %x |
| %red = call float @llvm.vector.reduce.fmax.v128f32(<128 x float> %v) |
| ret float %red |
| } |
| |
| declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>) |
| |
| define double @vreduce_fmax_v2f64(<2 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v2f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI67_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI67_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v9, ft0 |
| ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v9 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <2 x double>, <2 x double>* %x |
| %red = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>) |
| |
| define double @vreduce_fmax_v4f64(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI68_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI68_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_fmax_v4f64_nonans(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f64_nonans: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI69_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI69_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call nnan double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| define double @vreduce_fmax_v4f64_nonans_noinfs(<4 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v4f64_nonans_noinfs: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: lui a1, %hi(.LCPI70_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI70_0)(a1) |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v10, ft0 |
| ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v10 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <4 x double>, <4 x double>* %x |
| %red = call nnan ninf double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v) |
| ret double %red |
| } |
| |
| declare double @llvm.vector.reduce.fmax.v32f64(<32 x double>) |
| |
| define double @vreduce_fmax_v32f64(<32 x double>* %x) { |
| ; CHECK-LABEL: vreduce_fmax_v32f64: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vle64.v v8, (a0) |
| ; CHECK-NEXT: addi a0, a0, 128 |
| ; CHECK-NEXT: vle64.v v16, (a0) |
| ; CHECK-NEXT: lui a0, %hi(.LCPI71_0) |
| ; CHECK-NEXT: fld ft0, %lo(.LCPI71_0)(a0) |
| ; CHECK-NEXT: vfmax.vv v8, v8, v16 |
| ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu |
| ; CHECK-NEXT: vfmv.v.f v16, ft0 |
| ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu |
| ; CHECK-NEXT: vfredmax.vs v8, v8, v16 |
| ; CHECK-NEXT: vfmv.f.s fa0, v8 |
| ; CHECK-NEXT: ret |
| %v = load <32 x double>, <32 x double>* %x |
| %red = call double @llvm.vector.reduce.fmax.v32f64(<32 x double> %v) |
| ret double %red |
| } |