[RISCV] Rename assembler mnemonic of unordered floating-point reductions for v1.0-rc change
Rename vfredsum and vfwredsum to vfredusum and vfwredusum. Add aliases for vfredsum and vfwredsum.
Reviewed By: luismarques, HsiangKai, khchen, frasercrmck, kito-cheng, craig.topper
Differential Revision: https://reviews.llvm.org/D105690
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 5ef5874..6de76aa 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2006,11 +2006,11 @@
// 15.3. Vector Single-Width Floating-Point Reduction Instructions
defm vfredmax : RVVFloatingReductionBuiltin;
defm vfredmin : RVVFloatingReductionBuiltin;
-defm vfredsum : RVVFloatingReductionBuiltin;
+defm vfredusum : RVVFloatingReductionBuiltin;
defm vfredosum : RVVFloatingReductionBuiltin;
// 15.4. Vector Widening Floating-Point Reduction Instructions
-defm vfwredsum : RVVFloatingWidenReductionBuiltin;
+defm vfwredusum : RVVFloatingWidenReductionBuiltin;
defm vfwredosum : RVVFloatingWidenReductionBuiltin;
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
index 4555884..cda20b2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -5,194 +5,212 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
- vfloat32mf2_t vector,
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
+}
+
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
- vfloat32mf2_t vector,
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
+}
+
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
- vfloat32m1_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
- vfloat32m2_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
- vfloat32m4_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
- vfloat32m8_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
- vfloat64m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
- vfloat64m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
- vfloat64m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
- vfloat64m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
index 14049e1e..2ca8eb4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -5,114 +5,124 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
- vfloat32mf2_t vector,
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(dst, vector, scalar, vl);
+}
+
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst,
+ vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(dst, vector, scalar, vl);
+ return vfwredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
- vfloat32m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
- vfloat32m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
- vfloat32m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
- vfloat32m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
- vfloat32mf2_t vector,
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(mask, dst, vector, scalar, vl);
+ return vfwredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
- vfloat32m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
- vfloat32m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
- vfloat32m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
- vfloat32m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
index 6c1ad4e..b38bbc2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -6,194 +6,194 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
- vfloat32mf2_t vector,
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+ return vfredusum_vs_f32m1_f32m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m2_f32m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m4_f32m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m8_f32m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m1_f64m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m2_f64m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m4_f64m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m8_f64m1(dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
- vfloat32mf2_t vector,
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+ return vfredusum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
- vfloat32m1_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
- vfloat32m2_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
- vfloat32m4_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
- vfloat32m8_t vector,
- vfloat32m1_t scalar, size_t vl) {
- return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
- vfloat64m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
- vfloat64m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
- vfloat64m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
- vfloat64m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredusum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
@@ -394,112 +394,112 @@
return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
}
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
- return vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+ return vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
}
// CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1(
@@ -507,7 +507,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
}
@@ -516,7 +516,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
}
@@ -525,7 +525,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl);
}
@@ -534,7 +534,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl);
}
@@ -543,7 +543,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl);
}
@@ -552,7 +552,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl);
}
@@ -561,7 +561,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
}
@@ -570,7 +570,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
}
@@ -579,7 +579,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
}
@@ -588,7 +588,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
}
@@ -597,7 +597,7 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
}
@@ -606,6 +606,6 @@
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
deleted file mode 100644
index aefde64..0000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \
-// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
- vfloat32mf2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
- vfloat32m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
- vfloat32m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
- vfloat32m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
- vfloat32m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
- vfloat32mf2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
- vfloat32m1_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
- vfloat32m2_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
- vfloat32m4_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
- vfloat32m8_t vector,
- vfloat64m1_t scalar, size_t vl) {
- return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m1_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m2_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m4_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m8_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
- return vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
-}