| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| // REQUIRES: riscv-registered-target |
| // RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ |
| // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s |
| // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ |
| // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s |
| |
| #include <riscv_vector.h> |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { |
| return vfncvt_x_f_w_i16mf4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16mf4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { |
| return vfncvt_x_f_w_i16mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16mf4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16mf4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_x_f_w_i32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { |
| return vfncvt_f_x_w_f32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_f_f_w_f32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32mf2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m1(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m2(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m4(src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, |
| vfloat32mf2_t src, size_t vl) { |
| return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, |
| vint16mf4_t maskedoff, |
| vfloat32mf2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, |
| vfloat32m1_t src, size_t vl) { |
| return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, |
| vint16mf2_t maskedoff, |
| vfloat32m1_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, |
| vfloat32m2_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, |
| vfloat32m2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, |
| vfloat32m4_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, |
| vfloat32m4_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, |
| vfloat32m8_t src, size_t vl) { |
| return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, |
| vfloat32m8_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, |
| vfloat32mf2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]] |
| // |
| vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, |
| vuint16mf4_t maskedoff, |
| vfloat32mf2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, |
| vfloat32m1_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]] |
| // |
| vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, |
| vuint16mf2_t maskedoff, |
| vfloat32m1_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, |
| vfloat32m2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]] |
| // |
| vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, |
| vuint16m1_t maskedoff, |
| vfloat32m2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, |
| vfloat32m4_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]] |
| // |
| vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, |
| vfloat32m4_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, |
| vfloat32m8_t src, size_t vl) { |
| return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]] |
| // |
| vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, |
| vfloat32m8_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, |
| vint32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]] |
| // |
| vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, |
| vuint32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]] |
| // |
| vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, |
| vuint32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]] |
| // |
| vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, |
| vuint32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]] |
| // |
| vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, |
| vfloat32mf2_t maskedoff, |
| vint64m1_t src, size_t vl) { |
| return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, |
| vint64m2_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, |
| vint64m4_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, |
| vint64m8_t src, size_t vl) { |
| return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, |
| vfloat32mf2_t maskedoff, |
| vuint64m1_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, |
| vuint64m2_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, |
| vuint64m4_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, |
| vuint64m8_t src, size_t vl) { |
| return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, |
| vfloat32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]] |
| // |
| vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, |
| vfloat32mf2_t maskedoff, |
| vfloat64m1_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]] |
| // |
| vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, |
| vfloat32m1_t maskedoff, |
| vfloat64m2_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]] |
| // |
| vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, |
| vfloat32m2_t maskedoff, |
| vfloat64m4_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); |
| } |
| |
| // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( |
| // CHECK-RV32-NEXT: entry: |
| // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) |
| // CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( |
| // CHECK-RV64-NEXT: entry: |
| // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) |
| // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]] |
| // |
| vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, |
| vfloat32m4_t maskedoff, |
| vfloat64m8_t src, size_t vl) { |
| return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl); |
| } |