[RISCV][Clang] Add RVV miscellaneous intrinsic functions.

1. vreinterpret
2. vundefined
3. LMUL truncation and extension.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D100391

GitOrigin-RevId: 8f683366afcfd7c03faea615d1529a0014d7dbde
diff --git a/include/clang/Basic/riscv_vector.td b/include/clang/Basic/riscv_vector.td
index 8bb33e6..2d94f11 100644
--- a/include/clang/Basic/riscv_vector.td
+++ b/include/clang/Basic/riscv_vector.td
@@ -92,6 +92,17 @@
 //      (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
 //      type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
 //      builtins if its equivalent type has illegal lmul.
+//   (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
+//      vector type which only changed SEW as given value. Ignore to define a new
+//      builtin if its equivalent type has illegal lmul or the SEW does not changed.
+//   (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
+//      and LMUL), and computes another vector type which only changed LMUL as
+//      given value. The new LMUL should be smaller than the old one. Ignore to
+//      define a new builtin if its equivalent type has illegal lmul.
+//   (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
+//      and LMUL), and computes another vector type which only changed LMUL as
+//      given value. The new LMUL should be larger than the old one. Ignore to
+//      define a new builtin if its equivalent type has illegal lmul.
 //
 // Following with the example above, if t is "i", then "Ue" will yield unsigned
 // int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
@@ -1272,3 +1283,69 @@
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
                                     [["vm", "Uv", "UvUvUvm"]]>;
 }
+
+// Miscellaneous
+let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
+    IRName = "" in {
+  let Name = "vreinterpret_v",
+      ManualCodegen = [{
+        return Builder.CreateBitCast(Ops[0], ResultType);
+      }] in {
+    // Reinterpret between different type under the same SEW and LMUL
+    def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil">;
+    def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il">;
+    def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil">;
+    def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il">;
+    def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il">;
+    def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il">;
+
+    // Reinterpret between different SEW under the same LMUL
+    foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)",
+                       "(FixedSEW:64)"] in {
+      def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", dst_sew # "vv", "csil">;
+      def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", dst_sew # "UvUv", "csil">;
+    }
+  }
+
+  let Name = "vundefined",
+      ManualCodegen = [{
+        return llvm::UndefValue::get(ResultType);
+      }] in {
+    def vundefined : RVVBuiltin<"v", "v", "csilfd">;
+    def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">;
+  }
+
+  // LMUL truncation
+  // C/C++ Operand: VecTy, IR Operand: VecTy, Index
+  let Name = "vlmul_trunc_v",
+      ManualCodegen = [{ {
+        ID = Intrinsic::experimental_vector_extract;
+        IntrinsicTypes = {ResultType, Ops[0]->getType()};
+        Ops.push_back(ConstantInt::get(Int64Ty, 0));
+        return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+      } }] in {
+    foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
+                        "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
+      def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">;
+      def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">;
+    }
+  }
+
+  // LMUL extension
+  // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
+  let Name = "vlmul_ext_v",
+      ManualCodegen = [{
+        ID = Intrinsic::experimental_vector_insert;
+        IntrinsicTypes = {ResultType, Ops[0]->getType()};
+        Ops.push_back(llvm::UndefValue::get(ResultType));
+        std::swap(Ops[0], Ops[1]);
+        Ops.push_back(ConstantInt::get(Int64Ty, 0));
+        return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+      }] in {
+    foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
+                        "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
+      def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">;
+      def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">;
+    }
+  }
+}
diff --git a/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
new file mode 100644
index 0000000..6d1e055
--- /dev/null
+++ b/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c
@@ -0,0 +1,3368 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
+  return vlmul_ext_v_i8mf8_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
+  return vlmul_ext_v_i8mf4_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
+  return vlmul_ext_v_i8mf4_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
+  return vlmul_ext_v_i8mf4_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
+  return vlmul_ext_v_i8mf4_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
+  return vlmul_ext_v_i8mf4_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
+  return vlmul_ext_v_i8mf2_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
+  return vlmul_ext_v_i8mf2_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
+  return vlmul_ext_v_i8mf2_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
+  return vlmul_ext_v_i8mf2_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
+  return vlmul_ext_v_i8m1_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
+  return vlmul_ext_v_i8m1_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
+  return vlmul_ext_v_i8m1_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
+  return vlmul_ext_v_i8m2_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
+  return vlmul_ext_v_i8m2_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
+  return vlmul_ext_v_i8m4_i8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
+  return vlmul_ext_v_i16mf4_i16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
+  return vlmul_ext_v_i16mf4_i16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
+  return vlmul_ext_v_i16mf4_i16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
+  return vlmul_ext_v_i16mf4_i16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
+  return vlmul_ext_v_i16mf4_i16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
+  return vlmul_ext_v_i16mf2_i16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
+  return vlmul_ext_v_i16mf2_i16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
+  return vlmul_ext_v_i16mf2_i16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
+  return vlmul_ext_v_i16mf2_i16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
+  return vlmul_ext_v_i16m1_i16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
+  return vlmul_ext_v_i16m1_i16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
+  return vlmul_ext_v_i16m1_i16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
+  return vlmul_ext_v_i16m2_i16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
+  return vlmul_ext_v_i16m2_i16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
+  return vlmul_ext_v_i16m4_i16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
+  return vlmul_ext_v_i32mf2_i32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
+  return vlmul_ext_v_i32mf2_i32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
+  return vlmul_ext_v_i32mf2_i32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
+  return vlmul_ext_v_i32mf2_i32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
+  return vlmul_ext_v_i32m1_i32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
+  return vlmul_ext_v_i32m1_i32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
+  return vlmul_ext_v_i32m1_i32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
+  return vlmul_ext_v_i32m2_i32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
+  return vlmul_ext_v_i32m2_i32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
+  return vlmul_ext_v_i32m4_i32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
+  return vlmul_ext_v_i64m1_i64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
+  return vlmul_ext_v_i64m1_i64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
+  return vlmul_ext_v_i64m1_i64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
+  return vlmul_ext_v_i64m2_i64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
+  return vlmul_ext_v_i64m2_i64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
+  return vlmul_ext_v_i64m4_i64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
+  return vlmul_ext_v_u8mf8_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
+  return vlmul_ext_v_u8mf4_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
+  return vlmul_ext_v_u8mf4_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
+  return vlmul_ext_v_u8mf4_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
+  return vlmul_ext_v_u8mf4_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
+  return vlmul_ext_v_u8mf4_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
+  return vlmul_ext_v_u8mf2_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
+  return vlmul_ext_v_u8mf2_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
+  return vlmul_ext_v_u8mf2_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
+  return vlmul_ext_v_u8mf2_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
+  return vlmul_ext_v_u8m1_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
+  return vlmul_ext_v_u8m1_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
+  return vlmul_ext_v_u8m1_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
+  return vlmul_ext_v_u8m2_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
+  return vlmul_ext_v_u8m2_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
+  return vlmul_ext_v_u8m4_u8m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
+  return vlmul_ext_v_u16mf4_u16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
+  return vlmul_ext_v_u16mf4_u16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
+  return vlmul_ext_v_u16mf4_u16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
+  return vlmul_ext_v_u16mf4_u16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
+  return vlmul_ext_v_u16mf4_u16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
+  return vlmul_ext_v_u16mf2_u16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
+  return vlmul_ext_v_u16mf2_u16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
+  return vlmul_ext_v_u16mf2_u16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
+  return vlmul_ext_v_u16mf2_u16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
+  return vlmul_ext_v_u16m1_u16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
+  return vlmul_ext_v_u16m1_u16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
+  return vlmul_ext_v_u16m1_u16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
+  return vlmul_ext_v_u16m2_u16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
+  return vlmul_ext_v_u16m2_u16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
+  return vlmul_ext_v_u16m4_u16m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
+  return vlmul_ext_v_u32mf2_u32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
+  return vlmul_ext_v_u32mf2_u32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
+  return vlmul_ext_v_u32mf2_u32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
+  return vlmul_ext_v_u32mf2_u32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
+  return vlmul_ext_v_u32m1_u32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
+  return vlmul_ext_v_u32m1_u32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
+  return vlmul_ext_v_u32m1_u32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
+  return vlmul_ext_v_u32m2_u32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
+  return vlmul_ext_v_u32m2_u32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
+  return vlmul_ext_v_u32m4_u32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
+  return vlmul_ext_v_u64m1_u64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
+  return vlmul_ext_v_u64m1_u64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
+  return vlmul_ext_v_u64m1_u64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
+  return vlmul_ext_v_u64m2_u64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
+  return vlmul_ext_v_u64m2_u64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
+  return vlmul_ext_v_u64m4_u64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
+  return vlmul_ext_v_f32mf2_f32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
+  return vlmul_ext_v_f32mf2_f32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
+  return vlmul_ext_v_f32mf2_f32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
+  return vlmul_ext_v_f32mf2_f32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
+  return vlmul_ext_v_f32m1_f32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
+  return vlmul_ext_v_f32m1_f32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
+  return vlmul_ext_v_f32m1_f32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
+  return vlmul_ext_v_f32m2_f32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
+  return vlmul_ext_v_f32m2_f32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
+  return vlmul_ext_v_f32m4_f32m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
+  return vlmul_ext_v_f64m1_f64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
+  return vlmul_ext_v_f64m1_f64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
+  return vlmul_ext_v_f64m1_f64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
+  return vlmul_ext_v_f64m2_f64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
+  return vlmul_ext_v_f64m2_f64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
+  return vlmul_ext_v_f64m4_f64m8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) {
+  return vlmul_trunc_v_i8mf4_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) {
+  return vlmul_trunc_v_i8mf2_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) {
+  return vlmul_trunc_v_i8mf2_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) {
+  return vlmul_trunc_v_i8m1_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) {
+  return vlmul_trunc_v_i8m1_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) {
+  return vlmul_trunc_v_i8m1_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) {
+  return vlmul_trunc_v_i8m2_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) {
+  return vlmul_trunc_v_i8m2_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) {
+  return vlmul_trunc_v_i8m2_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) {
+  return vlmul_trunc_v_i8m2_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) {
+  return vlmul_trunc_v_i8m4_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) {
+  return vlmul_trunc_v_i8m4_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) {
+  return vlmul_trunc_v_i8m4_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) {
+  return vlmul_trunc_v_i8m4_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) {
+  return vlmul_trunc_v_i8m4_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) {
+  return vlmul_trunc_v_i8m8_i8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) {
+  return vlmul_trunc_v_i16mf2_i16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) {
+  return vlmul_trunc_v_i16m1_i16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) {
+  return vlmul_trunc_v_i16m1_i16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) {
+  return vlmul_trunc_v_i16m2_i16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) {
+  return vlmul_trunc_v_i16m2_i16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) {
+  return vlmul_trunc_v_i16m2_i16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) {
+  return vlmul_trunc_v_i16m4_i16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) {
+  return vlmul_trunc_v_i16m4_i16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) {
+  return vlmul_trunc_v_i16m4_i16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) {
+  return vlmul_trunc_v_i16m4_i16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) {
+  return vlmul_trunc_v_i16m8_i16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) {
+  return vlmul_trunc_v_i16m8_i16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) {
+  return vlmul_trunc_v_i16m8_i16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) {
+  return vlmul_trunc_v_i16m8_i16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) {
+  return vlmul_trunc_v_i16m8_i16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) {
+  return vlmul_trunc_v_i32m1_i32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) {
+  return vlmul_trunc_v_i32m2_i32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) {
+  return vlmul_trunc_v_i32m2_i32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) {
+  return vlmul_trunc_v_i32m4_i32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) {
+  return vlmul_trunc_v_i32m4_i32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) {
+  return vlmul_trunc_v_i32m4_i32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) {
+  return vlmul_trunc_v_i32m8_i32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) {
+  return vlmul_trunc_v_i32m8_i32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) {
+  return vlmul_trunc_v_i32m8_i32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) {
+  return vlmul_trunc_v_i32m8_i32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) {
+  return vlmul_trunc_v_i64m2_i64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) {
+  return vlmul_trunc_v_i64m4_i64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m4_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) {
+  return vlmul_trunc_v_i64m4_i64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) {
+  return vlmul_trunc_v_i64m8_i64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) {
+  return vlmul_trunc_v_i64m8_i64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) {
+  return vlmul_trunc_v_i64m8_i64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) {
+  return vlmul_trunc_v_u8mf4_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) {
+  return vlmul_trunc_v_u8mf2_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) {
+  return vlmul_trunc_v_u8mf2_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) {
+  return vlmul_trunc_v_u8m1_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) {
+  return vlmul_trunc_v_u8m1_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) {
+  return vlmul_trunc_v_u8m1_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) {
+  return vlmul_trunc_v_u8m2_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) {
+  return vlmul_trunc_v_u8m2_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) {
+  return vlmul_trunc_v_u8m2_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) {
+  return vlmul_trunc_v_u8m2_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) {
+  return vlmul_trunc_v_u8m4_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) {
+  return vlmul_trunc_v_u8m4_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) {
+  return vlmul_trunc_v_u8m4_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) {
+  return vlmul_trunc_v_u8m4_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) {
+  return vlmul_trunc_v_u8m4_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8mf8(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) {
+  return vlmul_trunc_v_u8m8_u8m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) {
+  return vlmul_trunc_v_u16mf2_u16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) {
+  return vlmul_trunc_v_u16m1_u16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) {
+  return vlmul_trunc_v_u16m1_u16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) {
+  return vlmul_trunc_v_u16m2_u16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) {
+  return vlmul_trunc_v_u16m2_u16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) {
+  return vlmul_trunc_v_u16m2_u16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) {
+  return vlmul_trunc_v_u16m4_u16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) {
+  return vlmul_trunc_v_u16m4_u16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) {
+  return vlmul_trunc_v_u16m4_u16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) {
+  return vlmul_trunc_v_u16m4_u16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) {
+  return vlmul_trunc_v_u16m8_u16mf4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) {
+  return vlmul_trunc_v_u16m8_u16mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) {
+  return vlmul_trunc_v_u16m8_u16m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) {
+  return vlmul_trunc_v_u16m8_u16m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) {
+  return vlmul_trunc_v_u16m8_u16m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) {
+  return vlmul_trunc_v_u32m1_u32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) {
+  return vlmul_trunc_v_u32m2_u32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) {
+  return vlmul_trunc_v_u32m2_u32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) {
+  return vlmul_trunc_v_u32m4_u32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) {
+  return vlmul_trunc_v_u32m4_u32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) {
+  return vlmul_trunc_v_u32m4_u32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) {
+  return vlmul_trunc_v_u32m8_u32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) {
+  return vlmul_trunc_v_u32m8_u32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) {
+  return vlmul_trunc_v_u32m8_u32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) {
+  return vlmul_trunc_v_u32m8_u32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) {
+  return vlmul_trunc_v_u64m2_u64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) {
+  return vlmul_trunc_v_u64m4_u64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m4_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) {
+  return vlmul_trunc_v_u64m4_u64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) {
+  return vlmul_trunc_v_u64m8_u64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) {
+  return vlmul_trunc_v_u64m8_u64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) {
+  return vlmul_trunc_v_u64m8_u64m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv2f32(<vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv2f32(<vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) {
+  return vlmul_trunc_v_f32m1_f32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) {
+  return vlmul_trunc_v_f32m2_f32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) {
+  return vlmul_trunc_v_f32m2_f32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) {
+  return vlmul_trunc_v_f32m4_f32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) {
+  return vlmul_trunc_v_f32m4_f32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) {
+  return vlmul_trunc_v_f32m4_f32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) {
+  return vlmul_trunc_v_f32m8_f32mf2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) {
+  return vlmul_trunc_v_f32m8_f32m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) {
+  return vlmul_trunc_v_f32m8_f32m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) {
+  return vlmul_trunc_v_f32m8_f32m4(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) {
+  return vlmul_trunc_v_f64m2_f64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) {
+  return vlmul_trunc_v_f64m4_f64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m4_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) {
+  return vlmul_trunc_v_f64m4_f64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) {
+  return vlmul_trunc_v_f64m8_f64m1(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
+  return vlmul_trunc_v_f64m8_f64m2(op1);
+}
+
+// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) {
+  return vlmul_trunc_v_f64m8_f64m4(op1);
+}
diff --git a/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c b/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
new file mode 100644
index 0000000..e3142ab
--- /dev/null
+++ b/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c
@@ -0,0 +1,2608 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf8_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) {
+  return vreinterpret_v_i8mf8_u8mf8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf4_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) {
+  return vreinterpret_v_i8mf4_u8mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) {
+  return vreinterpret_v_i8mf2_u8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) {
+  return vreinterpret_v_i8m1_u8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) {
+  return vreinterpret_v_i8m2_u8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) {
+  return vreinterpret_v_i8m4_u8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) {
+  return vreinterpret_v_i8m8_u8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf8_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) {
+  return vreinterpret_v_u8mf8_i8mf8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf4_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) {
+  return vreinterpret_v_u8mf4_i8mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) {
+  return vreinterpret_v_u8mf2_i8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) {
+  return vreinterpret_v_u8m1_i8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) {
+  return vreinterpret_v_u8m2_i8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) {
+  return vreinterpret_v_u8m4_i8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) {
+  return vreinterpret_v_u8m8_i8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf4_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) {
+  return vreinterpret_v_i16mf4_u16mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) {
+  return vreinterpret_v_i16mf2_u16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) {
+  return vreinterpret_v_i16m1_u16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) {
+  return vreinterpret_v_i16m2_u16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) {
+  return vreinterpret_v_i16m4_u16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) {
+  return vreinterpret_v_i16m8_u16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf4_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) {
+  return vreinterpret_v_u16mf4_i16mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) {
+  return vreinterpret_v_u16mf2_i16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) {
+  return vreinterpret_v_u16m1_i16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) {
+  return vreinterpret_v_u16m2_i16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) {
+  return vreinterpret_v_u16m4_i16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) {
+  return vreinterpret_v_u16m8_i16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) {
+  return vreinterpret_v_i32mf2_u32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) {
+  return vreinterpret_v_i32m1_u32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) {
+  return vreinterpret_v_i32m2_u32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) {
+  return vreinterpret_v_i32m4_u32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) {
+  return vreinterpret_v_i32m8_u32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) {
+  return vreinterpret_v_u32mf2_i32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) {
+  return vreinterpret_v_u32m1_i32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) {
+  return vreinterpret_v_u32m2_i32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) {
+  return vreinterpret_v_u32m4_i32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) {
+  return vreinterpret_v_u32m8_i32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32mf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) {
+  return vreinterpret_v_f32mf2_i32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) {
+  return vreinterpret_v_f32m1_i32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) {
+  return vreinterpret_v_f32m2_i32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) {
+  return vreinterpret_v_f32m4_i32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m8_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) {
+  return vreinterpret_v_f32m8_i32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32mf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) {
+  return vreinterpret_v_f32mf2_u32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) {
+  return vreinterpret_v_f32m1_u32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) {
+  return vreinterpret_v_f32m2_u32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) {
+  return vreinterpret_v_f32m4_u32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m8_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) {
+  return vreinterpret_v_f32m8_u32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) {
+  return vreinterpret_v_i32mf2_f32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) {
+  return vreinterpret_v_i32m1_f32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) {
+  return vreinterpret_v_i32m2_f32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) {
+  return vreinterpret_v_i32m4_f32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) {
+  return vreinterpret_v_i32m8_f32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) {
+  return vreinterpret_v_u32mf2_f32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) {
+  return vreinterpret_v_u32m1_f32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) {
+  return vreinterpret_v_u32m2_f32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) {
+  return vreinterpret_v_u32m4_f32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) {
+  return vreinterpret_v_u32m8_f32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) {
+  return vreinterpret_v_i64m1_u64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) {
+  return vreinterpret_v_i64m2_u64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) {
+  return vreinterpret_v_i64m4_u64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) {
+  return vreinterpret_v_i64m8_u64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) {
+  return vreinterpret_v_u64m1_i64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) {
+  return vreinterpret_v_u64m2_i64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) {
+  return vreinterpret_v_u64m4_i64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) {
+  return vreinterpret_v_u64m8_i64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) {
+  return vreinterpret_v_f64m1_i64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) {
+  return vreinterpret_v_f64m2_i64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) {
+  return vreinterpret_v_f64m4_i64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) {
+  return vreinterpret_v_f64m8_i64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) {
+  return vreinterpret_v_f64m1_u64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) {
+  return vreinterpret_v_f64m2_u64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) {
+  return vreinterpret_v_f64m4_u64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) {
+  return vreinterpret_v_f64m8_u64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) {
+  return vreinterpret_v_i64m1_f64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) {
+  return vreinterpret_v_i64m2_f64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) {
+  return vreinterpret_v_i64m4_f64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) {
+  return vreinterpret_v_i64m8_f64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) {
+  return vreinterpret_v_u64m1_f64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) {
+  return vreinterpret_v_u64m2_f64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) {
+  return vreinterpret_v_u64m4_f64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) {
+  return vreinterpret_v_u64m8_f64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf4_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) {
+  return vreinterpret_v_i8mf4_i16mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) {
+  return vreinterpret_v_i8mf2_i16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) {
+  return vreinterpret_v_i8m1_i16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) {
+  return vreinterpret_v_i8m2_i16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) {
+  return vreinterpret_v_i8m4_i16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) {
+  return vreinterpret_v_i8m8_i16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf4_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) {
+  return vreinterpret_v_u8mf4_u16mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) {
+  return vreinterpret_v_u8mf2_u16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) {
+  return vreinterpret_v_u8m1_u16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) {
+  return vreinterpret_v_u8m2_u16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) {
+  return vreinterpret_v_u8m4_u16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) {
+  return vreinterpret_v_u8m8_u16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) {
+  return vreinterpret_v_i8mf2_i32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) {
+  return vreinterpret_v_i8m1_i32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) {
+  return vreinterpret_v_i8m2_i32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) {
+  return vreinterpret_v_i8m4_i32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) {
+  return vreinterpret_v_i8m8_i32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) {
+  return vreinterpret_v_u8mf2_u32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) {
+  return vreinterpret_v_u8m1_u32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) {
+  return vreinterpret_v_u8m2_u32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) {
+  return vreinterpret_v_u8m4_u32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) {
+  return vreinterpret_v_u8m8_u32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) {
+  return vreinterpret_v_i8m1_i64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) {
+  return vreinterpret_v_i8m2_i64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) {
+  return vreinterpret_v_i8m4_i64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) {
+  return vreinterpret_v_i8m8_i64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) {
+  return vreinterpret_v_u8m1_u64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) {
+  return vreinterpret_v_u8m2_u64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) {
+  return vreinterpret_v_u8m4_u64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) {
+  return vreinterpret_v_u8m8_u64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf4_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) {
+  return vreinterpret_v_i16mf4_i8mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) {
+  return vreinterpret_v_i16mf2_i8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) {
+  return vreinterpret_v_i16m1_i8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) {
+  return vreinterpret_v_i16m2_i8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) {
+  return vreinterpret_v_i16m4_i8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) {
+  return vreinterpret_v_i16m8_i8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf4_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) {
+  return vreinterpret_v_u16mf4_u8mf4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) {
+  return vreinterpret_v_u16mf2_u8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) {
+  return vreinterpret_v_u16m1_u8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) {
+  return vreinterpret_v_u16m2_u8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) {
+  return vreinterpret_v_u16m4_u8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) {
+  return vreinterpret_v_u16m8_u8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) {
+  return vreinterpret_v_i16mf2_i32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) {
+  return vreinterpret_v_i16m1_i32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) {
+  return vreinterpret_v_i16m2_i32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) {
+  return vreinterpret_v_i16m4_i32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) {
+  return vreinterpret_v_i16m8_i32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) {
+  return vreinterpret_v_u16mf2_u32mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) {
+  return vreinterpret_v_u16m1_u32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) {
+  return vreinterpret_v_u16m2_u32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) {
+  return vreinterpret_v_u16m4_u32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) {
+  return vreinterpret_v_u16m8_u32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) {
+  return vreinterpret_v_i16m1_i64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) {
+  return vreinterpret_v_i16m2_i64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) {
+  return vreinterpret_v_i16m4_i64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) {
+  return vreinterpret_v_i16m8_i64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) {
+  return vreinterpret_v_u16m1_u64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) {
+  return vreinterpret_v_u16m2_u64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) {
+  return vreinterpret_v_u16m4_u64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) {
+  return vreinterpret_v_u16m8_u64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) {
+  return vreinterpret_v_i32mf2_i8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) {
+  return vreinterpret_v_i32m1_i8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) {
+  return vreinterpret_v_i32m2_i8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) {
+  return vreinterpret_v_i32m4_i8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) {
+  return vreinterpret_v_i32m8_i8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) {
+  return vreinterpret_v_u32mf2_u8mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) {
+  return vreinterpret_v_u32m1_u8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) {
+  return vreinterpret_v_u32m2_u8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) {
+  return vreinterpret_v_u32m4_u8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) {
+  return vreinterpret_v_u32m8_u8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) {
+  return vreinterpret_v_i32mf2_i16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) {
+  return vreinterpret_v_i32m1_i16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) {
+  return vreinterpret_v_i32m2_i16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) {
+  return vreinterpret_v_i32m4_i16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) {
+  return vreinterpret_v_i32m8_i16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) {
+  return vreinterpret_v_u32mf2_u16mf2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) {
+  return vreinterpret_v_u32m1_u16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) {
+  return vreinterpret_v_u32m2_u16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) {
+  return vreinterpret_v_u32m4_u16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) {
+  return vreinterpret_v_u32m8_u16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) {
+  return vreinterpret_v_i32m1_i64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) {
+  return vreinterpret_v_i32m2_i64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) {
+  return vreinterpret_v_i32m4_i64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) {
+  return vreinterpret_v_i32m8_i64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) {
+  return vreinterpret_v_u32m1_u64m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) {
+  return vreinterpret_v_u32m2_u64m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) {
+  return vreinterpret_v_u32m4_u64m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) {
+  return vreinterpret_v_u32m8_u64m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) {
+  return vreinterpret_v_i64m1_i8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) {
+  return vreinterpret_v_i64m2_i8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) {
+  return vreinterpret_v_i64m4_i8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) {
+  return vreinterpret_v_i64m8_i8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) {
+  return vreinterpret_v_u64m1_u8m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) {
+  return vreinterpret_v_u64m2_u8m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) {
+  return vreinterpret_v_u64m4_u8m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) {
+  return vreinterpret_v_u64m8_u8m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) {
+  return vreinterpret_v_i64m1_i16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) {
+  return vreinterpret_v_i64m2_i16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) {
+  return vreinterpret_v_i64m4_i16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) {
+  return vreinterpret_v_i64m8_i16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) {
+  return vreinterpret_v_u64m1_u16m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) {
+  return vreinterpret_v_u64m2_u16m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) {
+  return vreinterpret_v_u64m4_u16m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) {
+  return vreinterpret_v_u64m8_u16m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) {
+  return vreinterpret_v_i64m1_i32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) {
+  return vreinterpret_v_i64m2_i32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) {
+  return vreinterpret_v_i64m4_i32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) {
+  return vreinterpret_v_i64m8_i32m8(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) {
+  return vreinterpret_v_u64m1_u32m1(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) {
+  return vreinterpret_v_u64m2_u32m2(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
+  return vreinterpret_v_u64m4_u32m4(src);
+}
+
+// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) {
+  return vreinterpret_v_u64m8_u32m8(src);
+}
diff --git a/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c b/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
new file mode 100644
index 0000000..a322b74
--- /dev/null
+++ b/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c
@@ -0,0 +1,538 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vundefined_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> undef
+//
+vint8mf8_t test_vundefined_i8mf8() { return vundefined_i8mf8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> undef
+//
+vint8mf4_t test_vundefined_i8mf4() { return vundefined_i8mf4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> undef
+//
+vint8mf2_t test_vundefined_i8mf2() { return vundefined_i8mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> undef
+//
+vint8m1_t test_vundefined_i8m1() { return vundefined_i8m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> undef
+//
+vint8m2_t test_vundefined_i8m2() { return vundefined_i8m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> undef
+//
+vint8m4_t test_vundefined_i8m4() { return vundefined_i8m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> undef
+//
+vint8m8_t test_vundefined_i8m8() { return vundefined_i8m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> undef
+//
+vint16mf4_t test_vundefined_i16mf4() { return vundefined_i16mf4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> undef
+//
+vint16mf2_t test_vundefined_i16mf2() { return vundefined_i16mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> undef
+//
+vint16m1_t test_vundefined_i16m1() { return vundefined_i16m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> undef
+//
+vint16m2_t test_vundefined_i16m2() { return vundefined_i16m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> undef
+//
+vint16m4_t test_vundefined_i16m4() { return vundefined_i16m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> undef
+//
+vint16m8_t test_vundefined_i16m8() { return vundefined_i16m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> undef
+//
+vint32mf2_t test_vundefined_i32mf2() { return vundefined_i32mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> undef
+//
+vint32m1_t test_vundefined_i32m1() { return vundefined_i32m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> undef
+//
+vint32m2_t test_vundefined_i32m2() { return vundefined_i32m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> undef
+//
+vint32m4_t test_vundefined_i32m4() { return vundefined_i32m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> undef
+//
+vint32m8_t test_vundefined_i32m8() { return vundefined_i32m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> undef
+//
+vint64m1_t test_vundefined_i64m1() { return vundefined_i64m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> undef
+//
+vint64m2_t test_vundefined_i64m2() { return vundefined_i64m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> undef
+//
+vint64m4_t test_vundefined_i64m4() { return vundefined_i64m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> undef
+//
+vint64m8_t test_vundefined_i64m8() { return vundefined_i64m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> undef
+//
+vuint8mf8_t test_vundefined_u8mf8() { return vundefined_u8mf8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> undef
+//
+vuint8mf4_t test_vundefined_u8mf4() { return vundefined_u8mf4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> undef
+//
+vuint8mf2_t test_vundefined_u8mf2() { return vundefined_u8mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> undef
+//
+vuint8m1_t test_vundefined_u8m1() { return vundefined_u8m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> undef
+//
+vuint8m2_t test_vundefined_u8m2() { return vundefined_u8m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> undef
+//
+vuint8m4_t test_vundefined_u8m4() { return vundefined_u8m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> undef
+//
+vuint8m8_t test_vundefined_u8m8() { return vundefined_u8m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> undef
+//
+vuint16mf4_t test_vundefined_u16mf4() { return vundefined_u16mf4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> undef
+//
+vuint16mf2_t test_vundefined_u16mf2() { return vundefined_u16mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> undef
+//
+vuint16m1_t test_vundefined_u16m1() { return vundefined_u16m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> undef
+//
+vuint16m2_t test_vundefined_u16m2() { return vundefined_u16m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> undef
+//
+vuint16m4_t test_vundefined_u16m4() { return vundefined_u16m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> undef
+//
+vuint16m8_t test_vundefined_u16m8() { return vundefined_u16m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> undef
+//
+vuint32mf2_t test_vundefined_u32mf2() { return vundefined_u32mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> undef
+//
+vuint32m1_t test_vundefined_u32m1() { return vundefined_u32m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> undef
+//
+vuint32m2_t test_vundefined_u32m2() { return vundefined_u32m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> undef
+//
+vuint32m4_t test_vundefined_u32m4() { return vundefined_u32m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> undef
+//
+vuint32m8_t test_vundefined_u32m8() { return vundefined_u32m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> undef
+//
+vuint64m1_t test_vundefined_u64m1() { return vundefined_u64m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> undef
+//
+vuint64m2_t test_vundefined_u64m2() { return vundefined_u64m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> undef
+//
+vuint64m4_t test_vundefined_u64m4() { return vundefined_u64m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> undef
+//
+vuint64m8_t test_vundefined_u64m8() { return vundefined_u64m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> undef
+//
+vfloat32mf2_t test_vundefined_f32mf2() { return vundefined_f32mf2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> undef
+//
+vfloat32m1_t test_vundefined_f32m1() { return vundefined_f32m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> undef
+//
+vfloat32m2_t test_vundefined_f32m2() { return vundefined_f32m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> undef
+//
+vfloat32m4_t test_vundefined_f32m4() { return vundefined_f32m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> undef
+//
+vfloat32m8_t test_vundefined_f32m8() { return vundefined_f32m8(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> undef
+//
+vfloat64m1_t test_vundefined_f64m1() { return vundefined_f64m1(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> undef
+//
+vfloat64m2_t test_vundefined_f64m2() { return vundefined_f64m2(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> undef
+//
+vfloat64m4_t test_vundefined_f64m4() { return vundefined_f64m4(); }
+
+// CHECK-RV32-LABEL: @test_vundefined_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> undef
+//
+// CHECK-RV64-LABEL: @test_vundefined_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> undef
+//
+vfloat64m8_t test_vundefined_f64m8() { return vundefined_f64m8(); }
diff --git a/utils/TableGen/RISCVVEmitter.cpp b/utils/TableGen/RISCVVEmitter.cpp
index f13eee2..594e954 100644
--- a/utils/TableGen/RISCVVEmitter.cpp
+++ b/utils/TableGen/RISCVVEmitter.cpp
@@ -31,11 +31,8 @@
 namespace {
 
 // Exponential LMUL
-class LMULType {
-private:
+struct LMULType {
   int Log2LMUL;
-
-public:
   LMULType(int Log2LMUL);
   // Return the C/C++ string representation of LMUL
   std::string str() const;
@@ -188,6 +185,7 @@
   bool isMask() const { return IsMask; }
   size_t getNumOperand() const { return InputTypes.size(); }
   StringRef getIRName() const { return IRName; }
+  StringRef getManualCodegen() const { return ManualCodegen; }
   uint8_t getRISCVExtensions() const { return RISCVExtensions; }
 
   // Return the type string for a BUILTIN() macro in Builtins.def.
@@ -655,6 +653,20 @@
         PrintFatalError(
             "Illegal type transformer for Complex type transformer");
     };
+    auto ComputeFixedLog2LMUL =
+        [&](StringRef Value,
+            std::function<bool(const int32_t &, const int32_t &)> Compare) {
+          int32_t Log2LMUL;
+          Value.getAsInteger(10, Log2LMUL);
+          if (!Compare(Log2LMUL, LMUL.Log2LMUL)) {
+            ScalarType = Invalid;
+            return false;
+          }
+          // Update new LMUL
+          LMUL = LMULType(Log2LMUL);
+          UpdateAndCheckComplexProto();
+          return true;
+        };
     auto ComplexTT = ComplexType.split(":");
     if (ComplexTT.first == "Log2EEW") {
       uint32_t Log2EEW;
@@ -665,6 +677,25 @@
       ElementBitwidth = 1 << Log2EEW;
       ScalarType = ScalarTypeKind::SignedInteger;
       UpdateAndCheckComplexProto();
+    } else if (ComplexTT.first == "FixedSEW") {
+      uint32_t NewSEW;
+      ComplexTT.second.getAsInteger(10, NewSEW);
+      // Set invalid type if src and dst SEW are same.
+      if (ElementBitwidth == NewSEW) {
+        ScalarType = Invalid;
+        return;
+      }
+      // Update new SEW
+      ElementBitwidth = NewSEW;
+      UpdateAndCheckComplexProto();
+    } else if (ComplexTT.first == "LFixedLog2LMUL") {
+      // New LMUL should be larger than old
+      if (!ComputeFixedLog2LMUL(ComplexTT.second, std::greater<int32_t>()))
+        return;
+    } else if (ComplexTT.first == "SFixedLog2LMUL") {
+      // New LMUL should be smaller than old
+      if (!ComputeFixedLog2LMUL(ComplexTT.second, std::less<int32_t>()))
+        return;
     } else {
       PrintFatalError("Illegal complex type transformers!");
     }
@@ -806,8 +837,8 @@
 }
 
 void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
-
-  OS << "  ID = Intrinsic::riscv_" + getIRName() + ";\n";
+  if (!getIRName().empty())
+    OS << "  ID = Intrinsic::riscv_" + getIRName() + ";\n";
   if (hasManualCodegen()) {
     OS << ManualCodegen;
     OS << "break;\n";
@@ -1007,18 +1038,20 @@
 void RVVEmitter::createCodeGen(raw_ostream &OS) {
   std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
   createRVVIntrinsics(Defs);
-
-  // The same intrinsic IR name has the same switch body.
+  // IR name could be empty, use the stable sort preserves the relative order.
   std::stable_sort(Defs.begin(), Defs.end(),
                    [](const std::unique_ptr<RVVIntrinsic> &A,
                       const std::unique_ptr<RVVIntrinsic> &B) {
                      return A->getIRName() < B->getIRName();
                    });
-  // Print switch body when the ir name changes from previous iteration.
+  // Print switch body when the ir name or ManualCodegen changes from previous
+  // iteration.
   RVVIntrinsic *PrevDef = Defs.begin()->get();
   for (auto &Def : Defs) {
     StringRef CurIRName = Def->getIRName();
-    if (CurIRName != PrevDef->getIRName()) {
+    if (CurIRName != PrevDef->getIRName() ||
+        (CurIRName.empty() &&
+         Def->getManualCodegen() != PrevDef->getManualCodegen())) {
       PrevDef->emitCodeGenSwitchBody(OS);
     }
     PrevDef = Def.get();