[RISCV] Remove redundant test cases for index segment store (8/8).

Differential Revision: https://reviews.llvm.org/D97026

GitOrigin-RevId: 0ab3558b25d6ba0b4606d3831c9bb1d98c17d113
diff --git a/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
index 04fe0bc..b8414bf 100644
--- a/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
+++ b/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
@@ -33,66 +33,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv32i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv32i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv4i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv4i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64)
 
@@ -124,467 +64,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv1i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv1i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv1i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv1i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv8i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv8i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv4i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv4i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv1i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv1i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv2i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv2i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv8i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv8i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv4i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv4i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv64i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv64i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 64 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv64i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv64i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv64i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv64i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv4i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv4i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv4i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv4i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv8i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv8i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv1i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv1i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv1i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv1i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv2i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv2i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv8i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv8i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv8i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv8i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv32i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv32i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv32i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv32i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64)
 
@@ -614,126 +93,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv2i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv2i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i64(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i16_nxv2i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i16.nxv2i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i16_nxv2i64(<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv2i64(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -765,130 +124,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -920,99 +155,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -1042,35 +184,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -1102,308 +215,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -1435,130 +246,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -1590,99 +277,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -1714,37 +308,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -1776,320 +339,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv16i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv32i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -2123,138 +372,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv16i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv1i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv1i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv8i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -2288,105 +405,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv1i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv2i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv8i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -2420,39 +438,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv64i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -2486,270 +471,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv8i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv1i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv2i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv8i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv32i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv16i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv2i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i32_nxv2i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv2i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64)
 
@@ -2779,66 +500,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64)
 
@@ -2870,461 +531,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64)
 
@@ -3354,68 +560,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64)
 
@@ -3447,68 +591,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64)
 
@@ -3540,471 +622,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64)
 
@@ -4036,68 +653,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg3_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i64)
 
@@ -4131,72 +686,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv32i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv4i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64)
 
@@ -4230,501 +719,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv1i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv1i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv8i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv4i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv1i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv2i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv8i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv4i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv64i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv4i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv8i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv1i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv2i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv8i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv32i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64)
 
@@ -4758,188 +752,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv2i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i64(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg4_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv16i8_nxv2i64(<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv2i64(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -5002,66 +814,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -5093,186 +845,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -5304,308 +876,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -5668,68 +938,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -5761,192 +969,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -5978,324 +1000,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -6362,72 +1066,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -6461,204 +1099,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -6692,344 +1132,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -7100,76 +1202,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -7205,216 +1237,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -7450,364 +1272,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -7882,80 +1346,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -7993,228 +1383,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -8252,384 +1420,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -8708,84 +1498,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -8825,240 +1537,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -9098,404 +1576,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv16i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv32i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv32i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv32i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv4i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv16i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -9578,88 +1658,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv8i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv4i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -9701,252 +1699,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv2i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv8i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv4i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv64i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv64i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv64i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv4i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv4i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv8i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -9988,368 +1740,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv2i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv8i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv8i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv32i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv32i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv32i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv16i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv16i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv2i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i64_nxv2i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv2i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -10412,66 +1802,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -10503,186 +1833,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -10714,308 +1864,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -11078,68 +1926,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -11171,192 +1957,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -11388,324 +1988,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -11772,72 +2054,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -11871,204 +2087,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -12102,344 +2120,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -12510,76 +2190,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -12615,216 +2225,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -12860,364 +2260,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -13292,80 +2334,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -13403,228 +2371,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -13662,384 +2408,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -14118,84 +2486,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -14235,240 +2525,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -14508,404 +2564,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv16i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv32i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv4i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv16i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -14988,88 +2646,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv8i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv4i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -15111,252 +2687,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv2i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv8i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv4i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv64i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv4i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv8i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -15398,434 +2728,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv2i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv8i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv32i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv16i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv2i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i32_nxv2i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv2i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -15857,99 +2759,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -15981,95 +2790,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -16099,68 +2819,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -16190,312 +2848,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -16527,99 +2879,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -16651,99 +2910,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -16775,68 +2941,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -16868,328 +2972,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv16i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv32i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv4i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv16i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv1i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv1i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -17223,105 +3005,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv4i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv1i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv2i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -17355,105 +3038,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv4i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv64i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv4i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -17487,72 +3071,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv1i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv2i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -17586,196 +3104,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv32i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv16i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv2i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i16_nxv2i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv2i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -17805,126 +3133,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -17956,99 +3164,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -18078,35 +3193,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -18138,306 +3224,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -18469,130 +3255,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -18624,99 +3286,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -18748,37 +3317,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -18810,320 +3348,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -19157,138 +3381,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -19322,105 +3414,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -19454,39 +3447,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -19520,340 +3480,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -19889,146 +3515,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -20064,111 +3550,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -20204,41 +3585,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -20274,360 +3620,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -20665,154 +3657,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -20850,117 +3694,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -20998,43 +3731,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -21072,380 +3768,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -21485,162 +3807,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -21680,123 +3846,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -21836,45 +3885,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -21914,400 +3924,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv16i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv32i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -22349,170 +3965,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv16i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv1i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv1i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv8i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -22554,129 +4006,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv1i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv2i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv8i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -22718,47 +4047,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv64i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -22800,450 +4088,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv8i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv1i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv2i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv8i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv32i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv16i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv2i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i8_nxv2i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv2i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -23306,66 +4150,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -23397,186 +4181,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -23608,308 +4212,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -23972,68 +4274,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -24065,192 +4305,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -24282,324 +4336,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -24666,72 +4402,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -24765,204 +4435,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -24996,344 +4468,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -25404,76 +4538,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -25509,216 +4573,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -25754,364 +4608,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -26186,80 +4682,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -26297,228 +4719,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -26556,384 +4756,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -27012,84 +4834,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -27129,240 +4873,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -27402,404 +4912,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv16i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv32i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv4i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv16i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -27882,88 +4994,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv8i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv4i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -28005,252 +5035,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv2i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv8i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv4i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv64i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv4i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv8i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -28292,521 +5076,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv2i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv8i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv32i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv16i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv2i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i16_nxv2i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv2i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -28838,186 +5107,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -29049,93 +5138,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -29196,285 +5198,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -29506,192 +5229,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -29723,99 +5260,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -29878,303 +5322,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -30208,204 +5355,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -30439,105 +5388,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -30604,321 +5454,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -30954,216 +5489,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -31199,111 +5524,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -31374,339 +5594,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -31744,228 +5631,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -32003,117 +5668,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -32188,357 +5742,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -32578,240 +5781,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -32851,123 +5820,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -33046,375 +5898,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv16i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv32i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv4i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv16i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv1i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv1i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv8i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv4i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv1i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -33456,252 +5939,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv8i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv4i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv64i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv4i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv8i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv1i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -33743,129 +5980,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv8i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv32i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i32_nxv16i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -33948,184 +6062,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -34155,99 +6091,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -34279,95 +6122,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -34397,68 +6151,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -34488,310 +6180,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -34823,99 +6211,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -34947,99 +6242,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -35071,68 +6273,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -35164,328 +6304,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -35519,105 +6337,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -35651,105 +6370,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -35783,72 +6403,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -35882,348 +6436,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -36259,111 +6471,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -36399,111 +6506,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -36539,76 +6541,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -36644,368 +6576,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg5_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -37043,117 +6613,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -37191,117 +6650,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -37339,80 +6687,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -37450,388 +6724,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg6_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -37871,123 +6763,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -38027,123 +6802,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -38183,84 +6841,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -38300,408 +6880,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg7_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv16i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv32i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv4i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv16i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv1i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv1i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -38743,129 +6921,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv4i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv1i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv2i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -38907,129 +6962,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv4i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv64i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv4i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -39071,88 +7003,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv1i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv2i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -39194,230 +7044,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv32i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv16i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv2i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg8_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv8i8_nxv2i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv2i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv16i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv16i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv32i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv32i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv32i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv32i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv32i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv32i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -39449,130 +7075,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv16i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv16i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv1i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv1i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv1i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv1i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv8i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv8i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -39604,99 +7106,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv1i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv1i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv2i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv2i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv8i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv8i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -39728,35 +7137,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv64i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv64i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv64i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv64i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv64i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv64i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -39788,308 +7168,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv8i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv8i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv1i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv1i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv1i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv1i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv2i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv2i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv8i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv8i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv8i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv8i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv32i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv32i8(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv32i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv32i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv32i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv32i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i32(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv16i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv16i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv16i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv16i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i16(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv2i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv2i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i64_nxv2i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i64.nxv2i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i64_nxv2i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv2i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -40119,126 +7197,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -40270,99 +7228,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -40392,35 +7257,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -40452,306 +7288,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -40783,130 +7319,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -40938,99 +7350,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -41062,37 +7381,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -41124,320 +7412,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -41471,138 +7445,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -41636,105 +7478,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -41768,39 +7511,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -41834,340 +7544,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -42203,146 +7579,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -42378,111 +7614,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -42518,41 +7649,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -42588,360 +7684,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -42979,154 +7721,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -43164,117 +7758,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -43312,43 +7795,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -43386,380 +7832,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -43799,162 +7871,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -43994,123 +7910,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -44150,45 +7949,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -44228,400 +7988,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv16i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv32i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -44663,170 +8029,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv16i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv1i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv1i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv8i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -44868,129 +8070,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv1i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv2i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv8i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -45032,47 +8111,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv64i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -45114,450 +8152,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv8i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv1i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv2i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv8i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv32i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv16i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv2i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4i16_nxv2i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv2i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -45620,66 +8214,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -45711,186 +8245,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -45922,308 +8276,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -46286,68 +8338,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -46379,192 +8369,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -46596,324 +8400,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -46980,72 +8466,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -47079,204 +8499,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -47310,344 +8532,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -47718,76 +8602,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -47823,216 +8637,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -48068,364 +8672,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -48500,80 +8746,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -48611,228 +8783,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -48870,384 +8820,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -49326,84 +8898,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -49443,240 +8937,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -49716,404 +8976,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv16i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv32i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv4i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv16i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -50196,88 +9058,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv8i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv4i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -50319,252 +9099,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv2i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv8i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv4i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv64i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv4i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv8i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -50606,521 +9140,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv2i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv8i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv32i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv16i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv2i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1i8_nxv2i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv2i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -51152,186 +9171,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -51363,93 +9202,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -51510,285 +9262,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -51820,192 +9293,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -52037,99 +9324,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -52192,303 +9386,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -52522,204 +9419,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -52753,105 +9452,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -52918,321 +9518,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -53268,216 +9553,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -53513,111 +9588,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -53688,339 +9658,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -54058,228 +9695,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -54317,117 +9732,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -54502,357 +9806,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -54892,240 +9845,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -55165,123 +9884,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -55360,375 +9962,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv16i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv32i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv4i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv16i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv1i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv1i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv8i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv4i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv1i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -55770,252 +10003,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv8i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv4i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv64i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv4i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv8i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv1i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -56057,129 +10044,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv8i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv32i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i8_nxv16i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -56262,190 +10126,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv16i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv16i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv32i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv32i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv4i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv4i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv16i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv16i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv1i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv1i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv1i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv1i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -56477,99 +10157,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv4i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv4i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv1i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv1i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv2i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv2i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -56601,97 +10188,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv4i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv4i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv64i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv64i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv64i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv64i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv64i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv64i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv4i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv4i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv4i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv4i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -56721,68 +10217,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv1i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv1i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv1i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv1i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv2i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv2i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -56814,159 +10248,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv32i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv32i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv32i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv32i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv16i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv16i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv16i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv16i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv2i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv2i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i64(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8i32_nxv2i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8i32.nxv2i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8i32_nxv2i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv2i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i16>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv16i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv16i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i16>, <vscale x 32 x i1>, i64)
 
@@ -56996,498 +10277,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i32>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv4i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv4i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv16i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv16i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i64>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv1i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv1i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i32>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv1i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv1i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i16>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv8i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv8i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv4i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv4i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i16>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv1i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv1i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i32>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv2i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv2i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv8i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv8i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i64>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv4i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv4i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv64i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv64i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 64 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv64i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv64i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv64i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv64i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 64 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i16>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv4i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv4i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv4i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv4i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i64>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv8i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv8i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv1i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv1i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv1i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv1i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i8>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv2i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv2i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i32>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv8i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv8i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv8i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv8i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i8>, <vscale x 32 x i1>, i64)
 
@@ -57519,366 +10308,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i32>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv16i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv16i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv16i32(<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv16i32(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i16>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv2i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv2i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i64(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i64>, <vscale x 32 x i1>, i64)
-
-define void @test_vsuxseg2_nxv32i8_nxv2i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv32i8.nxv2i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv32i8_nxv2i64(<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv2i64(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 32 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -57910,186 +10339,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -58121,93 +10370,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -58268,285 +10430,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -58578,192 +10461,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -58795,99 +10492,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -58950,303 +10554,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -59280,204 +10587,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -59511,105 +10620,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -59676,321 +10686,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -60026,216 +10721,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -60271,111 +10756,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -60446,339 +10826,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -60816,228 +10863,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -61075,117 +10900,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -61260,357 +10974,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -61650,240 +11013,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -61923,123 +11052,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -62118,375 +11130,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv16i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv32i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv4i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv16i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv1i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv1i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv8i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv4i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv1i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -62528,252 +11171,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv8i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv4i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv64i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv4i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv8i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv1i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -62815,129 +11212,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv8i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv32i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2i16_nxv16i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -63020,281 +11294,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -63326,186 +11325,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -63537,93 +11356,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -63686,285 +11418,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -63996,192 +11449,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -64213,99 +11480,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -64368,303 +11542,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv16i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv32i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv32i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv32i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv4i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv16i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv1i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv1i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv8i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv4i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv1i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -64698,204 +11575,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv8i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv4i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv64i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv64i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv64i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv4i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv4i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv8i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv1i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv1i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -64929,105 +11608,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv8i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv8i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv32i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv32i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv32i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2i64_nxv16i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv16i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -65125,66 +11705,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv32i16(<vscale x 16 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv32i16(<vscale x 16 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv4i32(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv4i32(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i8>, <vscale x 16 x i1>, i64)
 
@@ -65216,467 +11736,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv1i64(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv1i64(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv1i32(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv1i32(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv8i16(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv8i16(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv4i8(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv4i8(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv1i16(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv1i16(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv2i32(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv2i32(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv8i8(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv8i8(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv4i64(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv4i64(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv64i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv64i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 64 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv64i8(<vscale x 16 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv64i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv64i8(<vscale x 16 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv64i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv4i16(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv4i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv4i16(<vscale x 16 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv4i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv8i64(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv8i64(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv1i8(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv1i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv1i8(<vscale x 16 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv1i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv2i8(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv2i8(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i32>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv8i32(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv8i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv8i32(<vscale x 16 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv8i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i8>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv32i8(<vscale x 16 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv32i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv32i8(<vscale x 16 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv32i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i32>, <vscale x 16 x i1>, i64)
 
@@ -65706,128 +11765,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i16>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv2i16(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv2i16(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i64(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i64>, <vscale x 16 x i1>, i64)
-
-define void @test_vsuxseg2_nxv16f16_nxv2i64(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv16f16.nxv2i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv16f16_nxv2i64(<vscale x 16 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv2i64(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 16 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv16i16(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv16i16(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv32i16(<vscale x 4 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv32i16(<vscale x 4 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -65859,130 +11796,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv16i8(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv16i8(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv1i64(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv1i64(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv1i32(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv1i32(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv8i16(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv8i16(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -66014,99 +11827,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv1i16(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv1i16(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv2i32(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv2i32(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv8i8(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv8i8(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -66138,35 +11858,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv64i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv64i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv64i8(<vscale x 4 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv64i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv64i8(<vscale x 4 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv64i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -66198,366 +11889,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv8i64(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv8i64(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv1i8(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv1i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv1i8(<vscale x 4 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv1i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv2i8(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv2i8(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv8i32(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv8i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv8i32(<vscale x 4 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv8i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv32i8(<vscale x 4 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv32i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv32i8(<vscale x 4 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv32i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv16i32(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv16i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv16i32(<vscale x 4 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv16i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv2i16(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv2i16(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f64_nxv2i64(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f64.nxv2i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f64_nxv2i64(<vscale x 4 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv2i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -66620,66 +11951,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -66711,186 +11982,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -66922,308 +12013,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -67286,68 +12075,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -67379,192 +12106,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -67596,324 +12137,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -67980,72 +12203,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -68079,204 +12236,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -68310,344 +12269,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -68718,76 +12339,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -68823,216 +12374,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -69068,364 +12409,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -69500,80 +12483,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -69611,228 +12520,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -69870,384 +12557,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -70326,84 +12635,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -70443,240 +12674,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -70716,404 +12713,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv16i16(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv32i16(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv4i32(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv16i8(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -71196,88 +12795,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv8i16(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv4i8(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -71319,252 +12836,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv2i32(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv8i8(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv4i64(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv64i8(<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv4i16(<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv8i64(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -71606,521 +12877,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv2i8(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv8i32(<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv32i8(<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv16i32(<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv2i16(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f64_nxv2i64(<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv2i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -72152,186 +12908,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -72363,93 +12939,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -72510,285 +12999,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -72820,192 +13030,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -73037,99 +13061,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -73192,303 +13123,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -73522,204 +13156,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -73753,105 +13189,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -73918,321 +13255,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -74268,216 +13290,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -74513,111 +13325,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -74688,339 +13395,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -75058,228 +13432,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -75317,117 +13469,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -75502,357 +13543,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -75892,240 +13582,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -76165,123 +13621,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -76360,375 +13699,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv16i16(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv32i16(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv4i32(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv16i8(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv1i64(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv1i32(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv8i16(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv4i8(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv1i16(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -76770,252 +13740,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv8i8(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv4i64(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv64i8(<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv4i16(<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv8i64(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv1i8(<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -77057,129 +13781,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv8i32(<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv32i8(<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f32_nxv16i32(<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -77262,122 +13863,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -77440,66 +13925,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -77531,186 +13956,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -77742,308 +13987,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -78106,68 +14049,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -78199,192 +14080,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -78416,324 +14111,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -78800,72 +14177,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -78899,204 +14210,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -79130,344 +14243,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -79538,76 +14313,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -79643,216 +14348,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -79888,364 +14383,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -80320,80 +14457,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -80431,228 +14494,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -80690,384 +14531,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -81146,84 +14609,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -81263,240 +14648,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -81536,404 +14687,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv16i16(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv32i16(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv4i32(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv16i8(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -82016,88 +14769,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv8i16(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv4i8(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -82139,252 +14810,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv2i32(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv8i8(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv4i64(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv64i8(<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv4i16(<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv8i64(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -82426,368 +14851,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv2i8(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv8i32(<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv32i8(<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv16i32(<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv2i16(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f16_nxv2i64(<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv2i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -82850,66 +14913,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -82941,186 +14944,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -83152,308 +14975,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg2_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -83516,68 +15037,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -83609,192 +15068,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -83826,324 +15099,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg3_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -84210,72 +15165,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -84309,204 +15198,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -84540,344 +15231,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg4_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -84948,76 +15301,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -85053,216 +15336,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -85298,364 +15371,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg5_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -85730,80 +15445,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -85841,228 +15482,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -86100,384 +15519,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg6_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -86556,84 +15597,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -86673,240 +15636,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -86946,404 +15675,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg7_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv16i16(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv32i16(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv4i32(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv16i8(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i64>, <vscale x 1 x i1>, i64)
 
@@ -87426,88 +15757,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv8i16(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv4i8(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i64)
 
@@ -87549,252 +15798,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv2i32(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv8i8(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv4i64(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv64i8(<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv4i16(<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv8i64(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i64)
 
@@ -87836,434 +15839,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv2i8(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv8i32(<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv32i8(<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv16i32(<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv2i16(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i64>, <vscale x 1 x i1>, i64)
-
-define void @test_vsuxseg8_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv1f32_nxv2i64(<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv2i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -88295,99 +15870,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -88419,95 +15901,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -88537,68 +15930,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -88628,312 +15959,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -88965,99 +15990,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -89089,99 +16021,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -89213,68 +16052,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -89306,328 +16083,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg3_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv16i16(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv32i16(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv4i32(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv16i8(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv1i64(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv1i32(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -89661,105 +16116,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv4i8(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv1i16(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv2i32(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -89793,105 +16149,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv4i64(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv64i8(<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv4i16(<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -89925,72 +16182,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv1i8(<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv2i8(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -90024,322 +16215,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv32i8(<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv16i32(<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv2i16(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg4_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv8f16_nxv2i64(<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv2i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv16i16(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv16i16(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv32i16(<vscale x 8 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv32i16(<vscale x 8 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv4i32(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv4i32(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv16i8(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv16i8(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv1i64(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv1i64(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv1i32(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv1i32(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i16>, <vscale x 8 x i1>, i64)
 
@@ -90371,99 +16246,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv4i8(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv4i8(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv1i16(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv1i16(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv2i32(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv2i32(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i8>, <vscale x 8 x i1>, i64)
 
@@ -90495,97 +16277,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv4i64(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv4i64(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv64i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv64i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 64 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv64i8(<vscale x 8 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv64i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv64i8(<vscale x 8 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv64i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv4i16(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv4i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv4i16(<vscale x 8 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv4i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i64>, <vscale x 8 x i1>, i64)
 
@@ -90615,68 +16306,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv1i8(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv1i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv1i8(<vscale x 8 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv1i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv2i8(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv2i8(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i32>, <vscale x 8 x i1>, i64)
 
@@ -90708,403 +16337,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i8>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv32i8(<vscale x 8 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv32i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv32i8(<vscale x 8 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v28, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v28, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv32i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i32>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv16i32(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv16i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv16i32(<vscale x 8 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv16i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i16>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv2i16(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv2i16(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv1r.v v25, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i64(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i64>, <vscale x 8 x i1>, i64)
-
-define void @test_vsuxseg2_nxv8f32_nxv2i64(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv8f32.nxv2i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv8f32_nxv2i64(<vscale x 8 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
-; CHECK-NEXT:    vmv2r.v v26, v12
-; CHECK-NEXT:    vmv4r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv2i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -91136,186 +16368,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -91347,93 +16399,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -91496,285 +16461,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -91806,192 +16492,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -92023,99 +16523,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -92178,303 +16585,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv16i16(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv32i16(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv4i32(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv16i8(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv1i64(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv1i32(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv8i16(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv4i8(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv1i16(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -92508,204 +16618,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv8i8(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv4i64(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv64i8(<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv4i16(<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv8i64(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv1i8(<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -92739,105 +16651,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv8i32(<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv32i8(<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f64_nxv16i32(<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -92904,64 +16717,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -92991,126 +16746,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -93142,99 +16777,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -93264,35 +16806,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -93324,306 +16837,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -93655,130 +16868,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -93810,99 +16899,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -93934,37 +16930,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -93996,320 +16961,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -94343,138 +16994,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -94508,105 +17027,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -94640,39 +17060,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -94706,340 +17093,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -95075,146 +17128,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -95250,111 +17163,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -95390,41 +17198,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -95460,360 +17233,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg5_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -95851,154 +17270,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -96036,117 +17307,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -96184,43 +17344,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -96258,380 +17381,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg6_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -96671,162 +17420,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -96866,123 +17459,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -97022,45 +17498,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -97100,400 +17537,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg7_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv16i16(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv32i16(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -97535,170 +17578,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv16i8(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv1i64(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv1i32(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv8i16(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -97740,129 +17619,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv1i16(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv2i32(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv8i8(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -97904,47 +17660,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv64i8(<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -97986,603 +17701,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv8i64(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv1i8(<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv2i8(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv8i32(<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv32i8(<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv16i32(<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv2i16(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg8_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv4f16_nxv2i64(<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv2i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -98614,186 +17732,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v25, v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -98825,93 +17763,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg2_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -98972,285 +17823,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -99282,192 +17854,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -99499,99 +17885,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg3_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -99654,303 +17947,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -99984,204 +17980,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -100215,105 +18013,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg4_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -100380,321 +18079,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -100730,216 +18114,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -100975,111 +18149,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg5_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg5_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -101150,339 +18219,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -101520,228 +18256,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -101779,117 +18293,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg6_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg6_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg6ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -101964,357 +18367,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -102354,240 +18406,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -102627,123 +18445,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg7_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg7_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg7ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -102822,375 +18523,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv16i16(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv32i16(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv4i32(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv16i8(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv1i64(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv1i32(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv8i16(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv4i8(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv1i16(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i64)
 
@@ -103232,252 +18564,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv8i8(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv4i64(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv64i8(<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv4i16(<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei16.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i64>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv8i64(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv1i8(<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i64)
 
@@ -103519,129 +18605,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv8i32(<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    vmv1r.v v2, v0
-; CHECK-NEXT:    vmv1r.v v3, v0
-; CHECK-NEXT:    vmv1r.v v4, v0
-; CHECK-NEXT:    vmv1r.v v5, v0
-; CHECK-NEXT:    vmv1r.v v6, v0
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv32i8(<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v1, v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei8.v v1, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i64)
-
-define void @test_vsuxseg8_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg8_mask_nxv2f16_nxv16i32(<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    vmv1r.v v9, v8
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v8
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v8
-; CHECK-NEXT:    vmv1r.v v14, v8
-; CHECK-NEXT:    vmv1r.v v15, v8
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vsuxseg8ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, i64)
 declare void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i64)
 
@@ -103724,64 +18687,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -103813,130 +18718,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -103968,99 +18749,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -104090,35 +18778,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -104150,308 +18809,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv1r.v v25, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v25, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg2_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg2_mask_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
-; CHECK-NEXT:    vmv2r.v v26, v10
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v26, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -104483,130 +18840,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -104638,99 +18871,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -104762,37 +18902,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -104824,320 +18933,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg3_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg3_mask_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg3ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv16i16(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv32i16(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i64)
 
@@ -105171,138 +18966,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv16i8(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv1i64(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv1i32(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv8i16(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i64)
 
@@ -105336,105 +18999,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv1i16(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv2i32(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv8i8(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i64>, <vscale x 4 x i1>, i64)
 
@@ -105468,39 +19032,6 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv64i8(<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
 declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, i64)
 declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, <vscale x 4 x i1>, i64)
 
@@ -105534,267 +19065,3 @@
   ret void
 }
 
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv8i64(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv1i8(<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv2i8(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv8i32(<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v0, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv32i8(<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei8.v v2, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv16i32(<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
-; CHECK-NEXT:    vmv2r.v v10, v8
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v8
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv2i16(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei16.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-
-declare void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, i64)
-declare void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i64>, <vscale x 4 x i1>, i64)
-
-define void @test_vsuxseg4_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v0, v8
-; CHECK-NEXT:    vmv2r.v v2, v0
-; CHECK-NEXT:    vmv2r.v v4, v0
-; CHECK-NEXT:    vmv2r.v v6, v0
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v0, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
-  ret void
-}
-
-define void @test_vsuxseg4_mask_nxv4f32_nxv2i64(<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
-; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v2, v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vsuxseg4ei64.v v2, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  tail call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv2i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
-  ret void
-}
-